diff --git a/.azure-pipelines/azure-pipelines.yml b/.azure-pipelines/azure-pipelines.yml index 3816369497..e9bfa6f8e4 100644 --- a/.azure-pipelines/azure-pipelines.yml +++ b/.azure-pipelines/azure-pipelines.yml @@ -29,22 +29,20 @@ schedules: always: true branches: include: - - stable-6 - - stable-5 + - stable-11 + - stable-10 - cron: 0 11 * * 0 displayName: Weekly (old stable branches) always: true branches: include: - - stable-4 + - stable-9 variables: - name: checkoutPath value: ansible_collections/community/general - name: coverageBranches value: main - - name: pipelinesCoverage - value: coverage - name: entryPoint value: tests/utils/shippable/shippable.sh - name: fetchDepth @@ -53,7 +51,7 @@ variables: resources: containers: - container: default - image: quay.io/ansible/azure-pipelines-test-container:3.0.0 + image: quay.io/ansible/azure-pipelines-test-container:7.0.0 pool: Standard @@ -72,54 +70,40 @@ stages: - test: 2 - test: 3 - test: 4 - - test: extra - - stage: Sanity_2_14 - displayName: Sanity 2.14 + - stage: Sanity_2_20 + displayName: Sanity 2.20 dependsOn: [] jobs: - template: templates/matrix.yml parameters: nameFormat: Test {0} - testFormat: 2.14/sanity/{0} + testFormat: 2.20/sanity/{0} targets: - test: 1 - test: 2 - test: 3 - test: 4 - - stage: Sanity_2_13 - displayName: Sanity 2.13 + - stage: Sanity_2_19 + displayName: Sanity 2.19 dependsOn: [] jobs: - template: templates/matrix.yml parameters: nameFormat: Test {0} - testFormat: 2.13/sanity/{0} + testFormat: 2.19/sanity/{0} targets: - test: 1 - test: 2 - test: 3 - test: 4 - - stage: Sanity_2_12 - displayName: Sanity 2.12 + - stage: Sanity_2_18 + displayName: Sanity 2.18 dependsOn: [] jobs: - template: templates/matrix.yml parameters: nameFormat: Test {0} - testFormat: 2.12/sanity/{0} - targets: - - test: 1 - - test: 2 - - test: 3 - - test: 4 - - stage: Sanity_2_11 - displayName: Sanity 2.11 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Test {0} - testFormat: 2.11/sanity/{0} + testFormat: 2.18/sanity/{0} targets: - test: 1 - test: 2 @@ -135,58 +119,48 @@ stages: nameFormat: Python {0} testFormat: devel/units/{0}/1 targets: - - test: 2.7 - - test: 3.5 - - test: 3.6 - - test: 3.7 - - test: 3.8 - test: 3.9 - test: '3.10' - test: '3.11' - - stage: Units_2_14 - displayName: Units 2.14 + - test: '3.12' + - test: '3.13' + - test: '3.14' + - stage: Units_2_20 + displayName: Units 2.20 dependsOn: [] jobs: - template: templates/matrix.yml parameters: nameFormat: Python {0} - testFormat: 2.14/units/{0}/1 + testFormat: 2.20/units/{0}/1 targets: - - test: 2.7 - test: 3.9 - - stage: Units_2_13 - displayName: Units 2.13 + - test: "3.12" + - test: "3.14" + - stage: Units_2_19 + displayName: Units 2.19 dependsOn: [] jobs: - template: templates/matrix.yml parameters: nameFormat: Python {0} - testFormat: 2.13/units/{0}/1 + testFormat: 2.19/units/{0}/1 targets: - - test: 2.7 - test: 3.8 - - stage: Units_2_12 - displayName: Units 2.12 + - test: "3.11" + - test: "3.13" + - stage: Units_2_18 + displayName: Units 2.18 dependsOn: [] jobs: - template: templates/matrix.yml parameters: nameFormat: Python {0} - testFormat: 2.12/units/{0}/1 + testFormat: 2.18/units/{0}/1 targets: - - test: 2.6 - test: 3.8 - - stage: Units_2_11 - displayName: Units 2.11 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.11/units/{0}/1 - targets: - - test: 2.7 - - test: 3.5 + - test: "3.11" + - test: "3.13" ## Remote - stage: Remote_devel_extra_vms @@ -197,14 +171,14 @@ stages: parameters: testFormat: devel/{0} targets: - - name: Alpine 3.17 - test: alpine/3.17 - # - name: Fedora 37 - # test: fedora/37 - # - name: Ubuntu 20.04 - # test: ubuntu/20.04 + - name: Alpine 3.22 + test: alpine/3.22 + # - name: Fedora 42 + # test: fedora/42 - name: Ubuntu 22.04 test: ubuntu/22.04 + - name: Ubuntu 24.04 + test: ubuntu/24.04 groups: - vm - stage: Remote_devel @@ -215,82 +189,68 @@ stages: parameters: testFormat: devel/{0} targets: - - name: macOS 12.0 - test: macos/12.0 - - name: RHEL 7.9 - test: rhel/7.9 - - name: RHEL 9.1 - test: rhel/9.1 - - name: FreeBSD 13.1 - test: freebsd/13.1 - - name: FreeBSD 12.4 - test: freebsd/12.4 + - name: macOS 15.3 + test: macos/15.3 + - name: RHEL 10.0 + test: rhel/10.0 + - name: RHEL 9.6 + test: rhel/9.6 + - name: FreeBSD 14.3 + test: freebsd/14.3 + - name: FreeBSD 13.5 + test: freebsd/13.5 groups: - 1 - 2 - 3 - - stage: Remote_2_14 - displayName: Remote 2.14 + - stage: Remote_2_20 + displayName: Remote 2.20 dependsOn: [] jobs: - template: templates/matrix.yml parameters: - testFormat: 2.14/{0} + testFormat: 2.20/{0} targets: - - name: RHEL 9.0 - test: rhel/9.0 - - name: FreeBSD 12.3 - test: freebsd/12.3 + - name: RHEL 10.0 + test: rhel/10.0 + - name: FreeBSD 14.3 + test: freebsd/14.3 groups: - 1 - 2 - 3 - - stage: Remote_2_13 - displayName: Remote 2.13 + - stage: Remote_2_19 + displayName: Remote 2.19 dependsOn: [] jobs: - template: templates/matrix.yml parameters: - testFormat: 2.13/{0} + testFormat: 2.19/{0} targets: - - name: macOS 12.0 - test: macos/12.0 - - name: RHEL 8.5 - test: rhel/8.5 + - name: RHEL 9.5 + test: rhel/9.5 + - name: RHEL 10.0 + test: rhel/10.0 + - name: FreeBSD 14.2 + test: freebsd/14.2 groups: - 1 - 2 - 3 - - stage: Remote_2_12 - displayName: Remote 2.12 + - stage: Remote_2_18 + displayName: Remote 2.18 dependsOn: [] jobs: - template: templates/matrix.yml parameters: - testFormat: 2.12/{0} + testFormat: 2.18/{0} targets: - - name: macOS 11.1 - test: macos/11.1 - - name: RHEL 8.4 - test: rhel/8.4 - - name: FreeBSD 13.0 - test: freebsd/13.0 - groups: - - 1 - - 2 - - 3 - - stage: Remote_2_11 - displayName: Remote 2.11 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - testFormat: 2.11/{0} - targets: - - name: RHEL 7.9 - test: rhel/7.9 - - name: RHEL 8.3 - test: rhel/8.3 + - name: macOS 14.3 + test: macos/14.3 + - name: RHEL 9.4 + test: rhel/9.4 + - name: FreeBSD 14.1 + test: freebsd/14.1 groups: - 1 - 2 @@ -305,86 +265,64 @@ stages: parameters: testFormat: devel/linux/{0} targets: - - name: CentOS 7 - test: centos7 - - name: Fedora 37 - test: fedora37 - - name: openSUSE 15 - test: opensuse15 - - name: Ubuntu 20.04 - test: ubuntu2004 + - name: Fedora 42 + test: fedora42 + - name: Alpine 3.22 + test: alpine322 - name: Ubuntu 22.04 test: ubuntu2204 - - name: Alpine 3 - test: alpine3 + - name: Ubuntu 24.04 + test: ubuntu2404 groups: - 1 - 2 - 3 - - stage: Docker_2_14 - displayName: Docker 2.14 + - stage: Docker_2_20 + displayName: Docker 2.20 dependsOn: [] jobs: - template: templates/matrix.yml parameters: - testFormat: 2.14/linux/{0} + testFormat: 2.20/linux/{0} targets: - - name: Fedora 36 - test: fedora36 + - name: Fedora 42 + test: fedora42 + - name: Alpine 3.22 + test: alpine322 groups: - 1 - 2 - 3 - - stage: Docker_2_13 - displayName: Docker 2.13 + - stage: Docker_2_19 + displayName: Docker 2.19 dependsOn: [] jobs: - template: templates/matrix.yml parameters: - testFormat: 2.13/linux/{0} + testFormat: 2.19/linux/{0} targets: - - name: Fedora 35 - test: fedora35 - - name: openSUSE 15 py2 - test: opensuse15py2 - - name: Alpine 3 - test: alpine3 + - name: Fedora 41 + test: fedora41 + - name: Alpine 3.21 + test: alpine321 groups: - 1 - 2 - 3 - - stage: Docker_2_12 - displayName: Docker 2.12 + - stage: Docker_2_18 + displayName: Docker 2.18 dependsOn: [] jobs: - template: templates/matrix.yml parameters: - testFormat: 2.12/linux/{0} + testFormat: 2.18/linux/{0} targets: - - name: CentOS 6 - test: centos6 - - name: Fedora 34 - test: fedora34 - - name: Ubuntu 18.04 - test: ubuntu1804 - groups: - - 1 - - 2 - - 3 - - stage: Docker_2_11 - displayName: Docker 2.11 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - testFormat: 2.11/linux/{0} - targets: - - name: Fedora 32 - test: fedora32 - - name: Fedora 33 - test: fedora33 - - name: Alpine 3 - test: alpine3 + - name: Fedora 40 + test: fedora40 + - name: Alpine 3.20 + test: alpine320 + - name: Ubuntu 24.04 + test: ubuntu2404 groups: - 1 - 2 @@ -399,101 +337,92 @@ stages: parameters: testFormat: devel/linux-community/{0} targets: - - name: Debian Bullseye + - name: Debian 11 Bullseye test: debian-bullseye/3.9 + - name: Debian 12 Bookworm + test: debian-bookworm/3.11 + - name: Debian 13 Trixie + test: debian-13-trixie/3.13 - name: ArchLinux - test: archlinux/3.10 - - name: CentOS Stream 8 - test: centos-stream8/3.9 + test: archlinux/3.13 groups: - 1 - 2 - 3 ### Generic - - stage: Generic_devel - displayName: Generic devel - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: devel/generic/{0}/1 - targets: - - test: 2.7 - - test: '3.11' - - stage: Generic_2_14 - displayName: Generic 2.14 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.14/generic/{0}/1 - targets: - - test: '3.10' - - stage: Generic_2_13 - displayName: Generic 2.13 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.13/generic/{0}/1 - targets: - - test: 3.9 - - stage: Generic_2_12 - displayName: Generic 2.12 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.12/generic/{0}/1 - targets: - - test: 3.8 - - stage: Generic_2_11 - displayName: Generic 2.11 - dependsOn: [] - jobs: - - template: templates/matrix.yml - parameters: - nameFormat: Python {0} - testFormat: 2.11/generic/{0}/1 - targets: - - test: 2.7 - - test: 3.5 +# Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled. +# - stage: Generic_devel +# displayName: Generic devel +# dependsOn: [] +# jobs: +# - template: templates/matrix.yml +# parameters: +# nameFormat: Python {0} +# testFormat: devel/generic/{0}/1 +# targets: +# - test: '3.9' +# - test: '3.12' +# - test: '3.14' +# - stage: Generic_2_20 +# displayName: Generic 2.20 +# dependsOn: [] +# jobs: +# - template: templates/matrix.yml +# parameters: +# nameFormat: Python {0} +# testFormat: 2.20/generic/{0}/1 +# targets: +# - test: '3.10' +# - test: '3.14' +# - stage: Generic_2_19 +# displayName: Generic 2.19 +# dependsOn: [] +# jobs: +# - template: templates/matrix.yml +# parameters: +# nameFormat: Python {0} +# testFormat: 2.19/generic/{0}/1 +# targets: +# - test: '3.9' +# - test: '3.13' +# - stage: Generic_2_18 +# displayName: Generic 2.18 +# dependsOn: [] +# jobs: +# - template: templates/matrix.yml +# parameters: +# nameFormat: Python {0} +# testFormat: 2.18/generic/{0}/1 +# targets: +# - test: '3.8' +# - test: '3.13' - stage: Summary condition: succeededOrFailed() dependsOn: - Sanity_devel - - Sanity_2_11 - - Sanity_2_12 - - Sanity_2_13 - - Sanity_2_14 + - Sanity_2_20 + - Sanity_2_19 + - Sanity_2_18 - Units_devel - - Units_2_11 - - Units_2_12 - - Units_2_13 - - Units_2_14 + - Units_2_20 + - Units_2_19 + - Units_2_18 - Remote_devel_extra_vms - Remote_devel - - Remote_2_11 - - Remote_2_12 - - Remote_2_13 - - Remote_2_14 + - Remote_2_20 + - Remote_2_19 + - Remote_2_18 - Docker_devel - - Docker_2_11 - - Docker_2_12 - - Docker_2_13 - - Docker_2_14 + - Docker_2_20 + - Docker_2_19 + - Docker_2_18 - Docker_community_devel # Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled. # - Generic_devel -# - Generic_2_11 -# - Generic_2_12 -# - Generic_2_13 -# - Generic_2_14 +# - Generic_2_20 +# - Generic_2_19 +# - Generic_2_18 jobs: - template: templates/coverage.yml diff --git a/.azure-pipelines/templates/coverage.yml b/.azure-pipelines/templates/coverage.yml index 3c8841aa26..1bf17e053a 100644 --- a/.azure-pipelines/templates/coverage.yml +++ b/.azure-pipelines/templates/coverage.yml @@ -28,16 +28,6 @@ jobs: - bash: .azure-pipelines/scripts/report-coverage.sh displayName: Generate Coverage Report condition: gt(variables.coverageFileCount, 0) - - task: PublishCodeCoverageResults@1 - inputs: - codeCoverageTool: Cobertura - # Azure Pipelines only accepts a single coverage data file. - # That means only Python or PowerShell coverage can be uploaded, but not both. - # Set the "pipelinesCoverage" variable to determine which type is uploaded. - # Use "coverage" for Python and "coverage-powershell" for PowerShell. - summaryFileLocation: "$(outputPath)/reports/$(pipelinesCoverage).xml" - displayName: Publish to Azure Pipelines - condition: gt(variables.coverageFileCount, 0) - bash: .azure-pipelines/scripts/publish-codecov.py "$(outputPath)" displayName: Publish to codecov.io condition: gt(variables.coverageFileCount, 0) diff --git a/.azure-pipelines/templates/matrix.yml b/.azure-pipelines/templates/matrix.yml index 4876375855..49f5d8595a 100644 --- a/.azure-pipelines/templates/matrix.yml +++ b/.azure-pipelines/templates/matrix.yml @@ -50,11 +50,11 @@ jobs: parameters: jobs: - ${{ if eq(length(parameters.groups), 0) }}: - - ${{ each target in parameters.targets }}: - - name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }} - test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }} - - ${{ if not(eq(length(parameters.groups), 0)) }}: - - ${{ each group in parameters.groups }}: - ${{ each target in parameters.targets }}: - - name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }} - test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }} + - name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }} + test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }} + - ${{ if not(eq(length(parameters.groups), 0)) }}: + - ${{ each group in parameters.groups }}: + - ${{ each target in parameters.targets }}: + - name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }} + test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }} diff --git a/.azure-pipelines/templates/test.yml b/.azure-pipelines/templates/test.yml index 700cf629d7..b263379c06 100644 --- a/.azure-pipelines/templates/test.yml +++ b/.azure-pipelines/templates/test.yml @@ -14,37 +14,37 @@ parameters: jobs: - ${{ each job in parameters.jobs }}: - - job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }} - displayName: ${{ job.name }} - container: default - workspace: - clean: all - steps: - - checkout: self - fetchDepth: $(fetchDepth) - path: $(checkoutPath) - - bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)" - displayName: Run Tests - - bash: .azure-pipelines/scripts/process-results.sh - condition: succeededOrFailed() - displayName: Process Results - - bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)" - condition: eq(variables.haveCoverageData, 'true') - displayName: Aggregate Coverage Data - - task: PublishTestResults@2 - condition: eq(variables.haveTestResults, 'true') - inputs: - testResultsFiles: "$(outputPath)/junit/*.xml" - displayName: Publish Test Results - - task: PublishPipelineArtifact@1 - condition: eq(variables.haveBotResults, 'true') - displayName: Publish Bot Results - inputs: - targetPath: "$(outputPath)/bot/" - artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)" - - task: PublishPipelineArtifact@1 - condition: eq(variables.haveCoverageData, 'true') - displayName: Publish Coverage Data - inputs: - targetPath: "$(Agent.TempDirectory)/coverage/" - artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)" + - job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }} + displayName: ${{ job.name }} + container: default + workspace: + clean: all + steps: + - checkout: self + fetchDepth: $(fetchDepth) + path: $(checkoutPath) + - bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)" + displayName: Run Tests + - bash: .azure-pipelines/scripts/process-results.sh + condition: succeededOrFailed() + displayName: Process Results + - bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)" + condition: eq(variables.haveCoverageData, 'true') + displayName: Aggregate Coverage Data + - task: PublishTestResults@2 + condition: eq(variables.haveTestResults, 'true') + inputs: + testResultsFiles: "$(outputPath)/junit/*.xml" + displayName: Publish Test Results + - task: PublishPipelineArtifact@1 + condition: eq(variables.haveBotResults, 'true') + displayName: Publish Bot Results + inputs: + targetPath: "$(outputPath)/bot/" + artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)" + - task: PublishPipelineArtifact@1 + condition: eq(variables.haveCoverageData, 'true') + displayName: Publish Coverage Data + inputs: + targetPath: "$(Agent.TempDirectory)/coverage/" + artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)" diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000000..cd4bdfee65 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,9 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# YAML reformatting +d032de3b16eed11ea3a31cd3d96d78f7c46a2ee0 +e8f965fbf8154ea177c6622da149f2ae8533bd3c +e938ca5f20651abc160ee6aba10014013d04dcc1 +eaa5e07b2866e05b6c7b5628ca92e9cb1142d008 diff --git a/.github/BOTMETA.yml b/.github/BOTMETA.yml index 7d014c9e96..d9d291f3b1 100644 --- a/.github/BOTMETA.yml +++ b/.github/BOTMETA.yml @@ -33,6 +33,8 @@ files: maintainers: $team_ansible_core $becomes/pmrun.py: maintainers: $team_ansible_core + $becomes/run0.py: + maintainers: konstruktoid $becomes/sesu.py: maintainers: nekonyuu $becomes/sudosu.py: @@ -50,6 +52,8 @@ files: $callbacks/cgroup_memory_recap.py: {} $callbacks/context_demo.py: {} $callbacks/counter_enabled.py: {} + $callbacks/default_without_diff.py: + maintainers: felixfontein $callbacks/dense.py: maintainers: dagwieers $callbacks/diy.py: @@ -57,7 +61,6 @@ files: $callbacks/elastic.py: keywords: apm observability maintainers: v1v - $callbacks/hipchat.py: {} $callbacks/jabber.py: {} $callbacks/log_plays.py: {} $callbacks/loganalytics.py: @@ -74,6 +77,8 @@ files: $callbacks/opentelemetry.py: keywords: opentelemetry observability maintainers: v1v + $callbacks/print_task.py: + maintainers: demonpig $callbacks/say.py: keywords: brew cask darwin homebrew macosx macports osx labels: macos say @@ -87,16 +92,22 @@ files: maintainers: ryancurrah $callbacks/syslog_json.py: maintainers: imjoseangel + $callbacks/tasks_only.py: + maintainers: felixfontein + $callbacks/timestamp.py: + maintainers: kurokobo $callbacks/unixy.py: labels: unixy maintainers: akatch - $callbacks/yaml.py: {} $connections/: labels: connections $connections/chroot.py: {} $connections/funcd.py: maintainers: mscherer $connections/iocage.py: {} + $connections/incus.py: + labels: incus + maintainers: stgraber $connections/jail.py: maintainers: $team_ansible_core $connections/lxc.py: {} @@ -108,10 +119,14 @@ files: $connections/saltstack.py: labels: saltstack maintainers: mscherer + $connections/wsl.py: + maintainers: rgl $connections/zone.py: maintainers: $team_ansible_core $doc_fragments/: labels: docs_fragments + $doc_fragments/django.py: + maintainers: russoz $doc_fragments/hpe3par.py: labels: hpe3par maintainers: farhan7500 gautamphegde @@ -119,10 +134,14 @@ files: labels: hwc maintainers: $team_huawei $doc_fragments/nomad.py: - maintainers: chris93111 + maintainers: chris93111 apecnascimento + $doc_fragments/pipx.py: + maintainers: russoz $doc_fragments/xenserver.py: labels: xenserver maintainers: bvitnik + $filters/accumulate.py: + maintainers: VannTen $filters/counter.py: maintainers: keilr $filters/crc32.py: @@ -133,6 +152,8 @@ files: maintainers: giner $filters/from_csv.py: maintainers: Ajpantuso + $filters/from_ini.py: + maintainers: sscheib $filters/groupby_as_dict.py: maintainers: felixfontein $filters/hashids.py: @@ -143,28 +164,64 @@ files: maintainers: Ajpantuso $filters/jc.py: maintainers: kellyjonbrazil + $filters/json_diff.yml: + maintainers: numo68 + $filters/json_patch.py: + maintainers: numo68 + $filters/json_patch.yml: + maintainers: numo68 + $filters/json_patch_recipe.yml: + maintainers: numo68 $filters/json_query.py: {} + $filters/keep_keys.py: + maintainers: vbotka + $filters/lists.py: + maintainers: cfiehe + $filters/lists_difference.yml: + maintainers: cfiehe + $filters/lists_intersect.yml: + maintainers: cfiehe $filters/lists_mergeby.py: maintainers: vbotka + $filters/lists_symmetric_difference.yml: + maintainers: cfiehe + $filters/lists_union.yml: + maintainers: cfiehe $filters/random_mac.py: {} + $filters/remove_keys.py: + maintainers: vbotka + $filters/replace_keys.py: + maintainers: vbotka + $filters/reveal_ansible_type.py: + maintainers: vbotka $filters/time.py: maintainers: resmo $filters/to_days.yml: maintainers: resmo $filters/to_hours.yml: maintainers: resmo + $filters/to_ini.py: + maintainers: sscheib $filters/to_milliseconds.yml: maintainers: resmo $filters/to_minutes.yml: maintainers: resmo $filters/to_months.yml: maintainers: resmo + $filters/to_nice_yaml.yml: + maintainers: felixfontein + $filters/to_prettytable.py: + maintainers: tgadiev $filters/to_seconds.yml: maintainers: resmo $filters/to_time_unit.yml: maintainers: resmo $filters/to_weeks.yml: maintainers: resmo + $filters/to_yaml.py: + maintainers: felixfontein + $filters/to_yaml.yml: + maintainers: felixfontein $filters/to_years.yml: maintainers: resmo $filters/unicode_normalize.py: @@ -177,6 +234,8 @@ files: maintainers: opoplawski $inventories/gitlab_runners.py: maintainers: morph027 + $inventories/iocage.py: + maintainers: vbotka $inventories/icinga2.py: maintainers: BongoEADGC6 $inventories/linode.py: @@ -192,18 +251,18 @@ files: keywords: opennebula dynamic inventory script labels: cloud opennebula maintainers: feldsam - $inventories/proxmox.py: - maintainers: $team_virt ilijamt $inventories/scaleway.py: labels: cloud scaleway maintainers: $team_scaleway - $inventories/stackpath_compute.py: - maintainers: shayrybak $inventories/virtualbox.py: {} $inventories/xen_orchestra.py: maintainers: ddelnano shinuza $lookups/: labels: lookups + $lookups/binary_file.py: + maintainers: felixfontein + $lookups/bitwarden_secrets_manager.py: + maintainers: jantari $lookups/bitwarden.py: maintainers: lungj $lookups/cartesian.py: {} @@ -232,22 +291,27 @@ files: $lookups/filetree.py: maintainers: dagwieers $lookups/flattened.py: {} + $lookups/github_app_access_token.py: + maintainers: weisheng-p blavoie $lookups/hiera.py: maintainers: jparrill $lookups/keyring.py: {} $lookups/lastpass.py: {} $lookups/lmdb_kv.py: maintainers: jpmens - $lookups/manifold.py: - labels: manifold - maintainers: galanoff + $lookups/merge_variables.py: + maintainers: rlenferink m-a-r-k-e alpex8 $lookups/onepass: labels: onepassword maintainers: samdoran $lookups/onepassword.py: - maintainers: azenk scottsb + ignore: scottsb + maintainers: azenk $lookups/onepassword_raw.py: - maintainers: azenk scottsb + ignore: scottsb + maintainers: azenk + $lookups/onepassword_ssh_key.py: + maintainers: mohammedbabelly20 $lookups/passwordstore.py: {} $lookups/random_pet.py: maintainers: Akasurde @@ -265,11 +329,23 @@ files: maintainers: delineaKrehl tylerezimmerman $module_utils/: labels: module_utils + $module_utils/android_sdkmanager.py: + maintainers: shamilovstas + $module_utils/btrfs.py: + maintainers: gnfzdz + $module_utils/cmd_runner_fmt.py: + maintainers: russoz + $module_utils/cmd_runner.py: + maintainers: russoz $module_utils/deps.py: maintainers: russoz + $module_utils/django.py: + maintainers: russoz $module_utils/gconftool2.py: labels: gconftool2 maintainers: russoz + $module_utils/gio_mime.py: + maintainers: russoz $module_utils/gitlab.py: keywords: gitlab source_control labels: gitlab @@ -294,7 +370,6 @@ files: maintainers: $team_manageiq $module_utils/memset.py: labels: cloud memset - maintainers: glitchcrab $module_utils/mh/: labels: module_helper maintainers: russoz @@ -306,15 +381,18 @@ files: $module_utils/oracle/oci_utils.py: labels: cloud maintainers: $team_oracle + $module_utils/pacemaker.py: + maintainers: munchtoast $module_utils/pipx.py: labels: pipx maintainers: russoz + $module_utils/pkg_req.py: + maintainers: russoz + $module_utils/python_runner.py: + maintainers: russoz $module_utils/puppet.py: labels: puppet maintainers: russoz - $module_utils/pure.py: - labels: pure pure_storage - maintainers: $team_purestorage $module_utils/redfish_utils.py: labels: redfish_utils maintainers: $team_redfish @@ -323,16 +401,26 @@ files: $module_utils/scaleway.py: labels: cloud scaleway maintainers: $team_scaleway + $module_utils/snap.py: + labels: snap + maintainers: russoz $module_utils/ssh.py: maintainers: russoz + $module_utils/systemd.py: + maintainers: NomakCooper $module_utils/storage/hpe3par/hpe3par.py: maintainers: farhan7500 gautamphegde $module_utils/utm_utils.py: labels: utm_utils maintainers: $team_e_spirit + $module_utils/vardict.py: + labels: vardict + maintainers: russoz $module_utils/wdc_redfish_utils.py: labels: wdc_redfish_utils maintainers: $team_wdc + $module_utils/xdg_mime.py: + maintainers: mhalano $module_utils/xenserver.py: labels: xenserver maintainers: bvitnik @@ -359,6 +447,8 @@ files: ignore: DavidWittman jiuka labels: alternatives maintainers: mulby + $modules/android_sdk.py: + maintainers: shamilovstas $modules/ansible_galaxy_install.py: maintainers: russoz $modules/apache2_mod_proxy.py: @@ -386,14 +476,16 @@ files: keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool labels: beadm solaris maintainers: $team_solaris - $modules/bearychat.py: - maintainers: tonyseek $modules/bigpanda.py: - maintainers: hkariti + ignore: hkariti $modules/bitbucket_: maintainers: catcombo + $modules/bootc_manage.py: + maintainers: cooktheryan $modules/bower.py: maintainers: mwarkentin + $modules/btrfs_: + maintainers: gnfzdz $modules/bundler.py: maintainers: thoiberg $modules/bzr.py: @@ -410,8 +502,6 @@ files: maintainers: NickatEpic $modules/cisco_webex.py: maintainers: drew-russell - $modules/clc_: - maintainers: clc-runner $modules/cloud_init_data_facts.py: maintainers: resmo $modules/cloudflare_dns.py: @@ -423,7 +513,7 @@ files: ignore: resmo maintainers: dmtrs $modules/consul: - ignore: colin-nolan + ignore: colin-nolan Hakon maintainers: $team_consul $modules/copr.py: maintainers: schlupov @@ -440,9 +530,11 @@ files: labels: datadog_event maintainers: n0ts $modules/datadog_monitor.py: - maintainers: skornehl + ignore: skornehl $modules/dconf.py: maintainers: azaghal + $modules/decompress.py: + maintainers: shamilovstas $modules/deploy_helper.py: maintainers: ramondelafuente $modules/dimensiondata_network.py: @@ -452,12 +544,24 @@ files: maintainers: tintoy $modules/discord.py: maintainers: cwollinger + $modules/django_check.py: + maintainers: russoz + $modules/django_command.py: + maintainers: russoz + $modules/django_createcachetable.py: + maintainers: russoz + $modules/django_dumpdata.py: + maintainers: russoz + $modules/django_loaddata.py: + maintainers: russoz $modules/django_manage.py: ignore: scottanderson42 tastychutney labels: django_manage maintainers: russoz $modules/dnf_versionlock.py: maintainers: moreda + $modules/dnf_config_manager.py: + maintainers: ahyattdev $modules/dnsimple.py: maintainers: drcapulet $modules/dnsimple_info.py: @@ -477,9 +581,9 @@ files: $modules/etcd3.py: ignore: vfauth maintainers: evrardjp - $modules/facter.py: + $modules/facter_facts.py: labels: facter - maintainers: $team_ansible_core gamethis + maintainers: russoz $team_ansible_core gamethis $modules/filesize.py: maintainers: quidame $modules/filesystem.py: @@ -489,8 +593,6 @@ files: maintainers: $team_flatpak $modules/flatpak_remote.py: maintainers: $team_flatpak - $modules/flowdock.py: - ignore: mcodd $modules/gandi_livedns.py: maintainers: gthiemonge $modules/gconftool2.py: @@ -502,8 +604,12 @@ files: $modules/gem.py: labels: gem maintainers: $team_ansible_core johanwiren + $modules/gio_mime.py: + maintainers: russoz $modules/git_config.py: maintainers: djmattyg007 mgedmin + $modules/git_config_info.py: + maintainers: guenhter $modules/github_: maintainers: stpierre $modules/github_deploy_key.py: @@ -522,28 +628,39 @@ files: keywords: gitlab source_control maintainers: $team_gitlab notify: jlozadad + ignore: dj-wasabi $modules/gitlab_branch.py: maintainers: paytroff + $modules/gitlab_issue.py: + maintainers: zvaraondrej + $modules/gitlab_label.py: + maintainers: gpongelli + $modules/gitlab_merge_request.py: + maintainers: zvaraondrej + $modules/gitlab_milestone.py: + maintainers: gpongelli $modules/gitlab_project_variable.py: maintainers: markuman + $modules/gitlab_instance_variable.py: + maintainers: benibr $modules/gitlab_runner.py: maintainers: SamyCoenen $modules/gitlab_user.py: maintainers: LennertMertens stgrace + $modules/gitlab_group_access_token.py: + maintainers: pixslx + $modules/gitlab_project_access_token.py: + maintainers: pixslx $modules/grove.py: maintainers: zimbatm $modules/gunicorn.py: maintainers: agmezr - $modules/hana_query.py: - maintainers: rainerleber $modules/haproxy.py: maintainers: ravibhure Normo $modules/heroku_collaborator.py: maintainers: marns93 $modules/hg.py: maintainers: yeukhon - $modules/hipchat.py: - maintainers: pb8226 shirou $modules/homebrew.py: ignore: ryansb keywords: brew cask darwin homebrew macosx macports osx @@ -562,6 +679,11 @@ files: labels: homebrew_ macos maintainers: $team_macos notify: chris-short + $modules/homebrew_services.py: + ignore: ryansb + keywords: brew cask services darwin homebrew macosx macports osx + labels: homebrew_ macos + maintainers: $team_macos kitizz $modules/homectl.py: maintainers: jameslivulpi $modules/honeybadger_deployment.py: @@ -588,7 +710,7 @@ files: ignore: jose-delarosa maintainers: $team_redfish $modules/ilo_: - ignore: jose-delarosa + ignore: jose-delarosa varini-hp maintainers: $team_redfish $modules/imc_rest.py: labels: cisco @@ -620,6 +742,13 @@ files: maintainers: bregman-arie $modules/ipa_: maintainers: $team_ipa + ignore: fxfitz + $modules/ipa_getkeytab.py: + maintainers: abakanovskii + $modules/ipa_dnsrecord.py: + maintainers: $team_ipa jwbernin + $modules/ipbase_info.py: + maintainers: dominikkukacka $modules/ipa_pwpolicy.py: maintainers: adralioh $modules/ipa_service.py: @@ -654,33 +783,55 @@ files: labels: jboss maintainers: $team_jboss jhoekx $modules/jenkins_build.py: - maintainers: brettmilford unnecessary-username + maintainers: brettmilford unnecessary-username juanmcasanova + $modules/jenkins_build_info.py: + maintainers: juanmcasanova + $modules/jenkins_credential.py: + maintainers: YoussefKhalidAli $modules/jenkins_job.py: maintainers: sermilrod $modules/jenkins_job_info.py: maintainers: stpierre + $modules/jenkins_node.py: + maintainers: phyrwork $modules/jenkins_plugin.py: maintainers: jtyr $modules/jenkins_script.py: maintainers: hogarthj $modules/jira.py: - ignore: DWSR + ignore: DWSR tarka labels: jira - maintainers: Slezhuk tarka pertoft + maintainers: Slezhuk pertoft + $modules/kdeconfig.py: + maintainers: smeso $modules/kernel_blacklist.py: maintainers: matze $modules/keycloak_: maintainers: $team_keycloak $modules/keycloak_authentication.py: maintainers: elfelip Gaetan2907 + $modules/keycloak_authentication_required_actions.py: + maintainers: Skrekulko + $modules/keycloak_authz_authorization_scope.py: + maintainers: mattock + $modules/keycloak_authz_permission.py: + maintainers: mattock + $modules/keycloak_authz_custom_policy.py: + maintainers: mattock + $modules/keycloak_authz_permission_info.py: + maintainers: mattock $modules/keycloak_client_rolemapping.py: maintainers: Gaetan2907 $modules/keycloak_clientscope.py: maintainers: Gaetan2907 + $modules/keycloak_clientscope_type.py: + maintainers: simonpahl $modules/keycloak_clientsecret_info.py: maintainers: fynncfchen johncant $modules/keycloak_clientsecret_regenerate.py: maintainers: fynncfchen johncant + $modules/keycloak_component.py: + maintainers: fivetide $modules/keycloak_group.py: maintainers: adamgoossens $modules/keycloak_identity_provider.py: @@ -689,18 +840,32 @@ files: maintainers: kris2kris $modules/keycloak_realm_info.py: maintainers: fynncfchen + $modules/keycloak_realm_key.py: + maintainers: mattock $modules/keycloak_role.py: maintainers: laurpaum + $modules/keycloak_user.py: + maintainers: elfelip $modules/keycloak_user_federation.py: maintainers: laurpaum + $modules/keycloak_userprofile.py: + maintainers: yeoldegrove + $modules/keycloak_component_info.py: + maintainers: desand01 + $modules/keycloak_client_rolescope.py: + maintainers: desand01 $modules/keycloak_user_rolemapping.py: maintainers: bratwurzt + $modules/keycloak_realm_rolemapping.py: + maintainers: agross mhuysamen Gaetan2907 $modules/keyring.py: maintainers: ahussey-redhat $modules/keyring_info.py: maintainers: ahussey-redhat $modules/kibana_plugin.py: maintainers: barryib + $modules/krb_ticket.py: + maintainers: abakanovskii $modules/launchd.py: maintainers: martinm82 $modules/layman.py: @@ -711,6 +876,8 @@ files: maintainers: drybjed jtyr noles $modules/ldap_entry.py: maintainers: jtyr + $modules/ldap_inc.py: + maintainers: pduveau $modules/ldap_passwd.py: maintainers: KellerFuchs jtyr $modules/ldap_search.py: @@ -737,6 +904,12 @@ files: maintainers: nerzhul $modules/lvg.py: maintainers: abulimov + $modules/lvm_pv.py: + maintainers: klention + $modules/lvm_pv_move_data.py: + maintainers: klention + $modules/lvg_rename.py: + maintainers: lszomor $modules/lvol.py: maintainers: abulimov jhoekx zigaSRC unkaputtbar112 $modules/lxc_container.py: @@ -785,7 +958,7 @@ files: labels: maven_artifact maintainers: tumbl3w33d turb $modules/memset_: - maintainers: glitchcrab + ignore: glitchcrab $modules/mksysb.py: labels: aix mksysb maintainers: $team_aix @@ -821,7 +994,7 @@ files: $modules/nmcli.py: maintainers: alcamie101 $modules/nomad_: - maintainers: chris93111 + maintainers: chris93111 apecnascimento $modules/nosh.py: maintainers: tacatac $modules/npm.py: @@ -850,6 +1023,8 @@ files: maintainers: $team_opennebula $modules/one_host.py: maintainers: rvalle + $modules/one_vnet.py: + maintainers: abakanovskii $modules/oneandone_: maintainers: aajdinov edevenport $modules/onepassword_info.py: @@ -887,7 +1062,13 @@ files: $modules/ovh_monthly_billing.py: maintainers: fraff $modules/pacemaker_cluster.py: - maintainers: matbu + maintainers: matbu munchtoast + $modules/pacemaker_info.py: + maintainers: munchtoast + $modules/pacemaker_resource.py: + maintainers: munchtoast + $modules/pacemaker_stonith.py: + maintainers: munchtoast $modules/packet_: maintainers: nurfet-becirevic t0mk $modules/packet_device.py: @@ -906,7 +1087,7 @@ files: labels: pagerduty maintainers: suprememoocow thaumos $modules/pagerduty_alert.py: - maintainers: ApsOps + maintainers: ApsOps xshen1 $modules/pagerduty_change.py: maintainers: adamvaughan $modules/pagerduty_user.py: @@ -918,7 +1099,7 @@ files: $modules/pamd.py: maintainers: kevensen $modules/parted.py: - maintainers: ColOfAbRiX rosowiecki jake2184 + maintainers: ColOfAbRiX jake2184 $modules/pear.py: ignore: jle64 labels: pear @@ -949,6 +1130,9 @@ files: maintainers: $team_solaris dermute $modules/pmem.py: maintainers: mizumm + $modules/pnpm.py: + ignore: chrishoffman + maintainers: aretrosen $modules/portage.py: ignore: sayap labels: portage @@ -960,34 +1144,13 @@ files: maintainers: $team_bsd berenddeboer $modules/pritunl_: maintainers: Lowess - $modules/profitbricks: - maintainers: baldwinSPC - $modules/proxmox: - keywords: kvm libvirt proxmox qemu - labels: proxmox virt - maintainers: $team_virt - $modules/proxmox.py: - ignore: skvidal - maintainers: UnderGreen - $modules/proxmox_disk.py: - maintainers: castorsky - $modules/proxmox_kvm.py: - ignore: skvidal - maintainers: helldorado - $modules/proxmox_nic.py: - maintainers: Kogelvis - $modules/proxmox_tasks_info: - maintainers: paginabianca - $modules/proxmox_template.py: - ignore: skvidal - maintainers: UnderGreen $modules/pubnub_blocks.py: maintainers: parfeon pubnub $modules/pulp_repo.py: maintainers: sysadmind $modules/puppet.py: labels: puppet - maintainers: nibalizer emonty + maintainers: emonty $modules/pushbullet.py: maintainers: willybarro $modules/pushover.py: @@ -995,54 +1158,15 @@ files: $modules/python_requirements_info.py: ignore: ryansb maintainers: willthames - $modules/rax: - ignore: ryansb sivel - $modules/rax.py: - maintainers: omgjlk sivel - $modules/rax_cbs.py: - maintainers: claco - $modules/rax_cbs_attachments.py: - maintainers: claco - $modules/rax_cdb.py: - maintainers: jails - $modules/rax_cdb_database.py: - maintainers: jails - $modules/rax_cdb_user.py: - maintainers: jails - $modules/rax_clb.py: - maintainers: claco - $modules/rax_clb_nodes.py: - maintainers: neuroid - $modules/rax_clb_ssl.py: - maintainers: smashwilson - $modules/rax_files.py: - maintainers: angstwad - $modules/rax_files_objects.py: - maintainers: angstwad - $modules/rax_identity.py: - maintainers: claco - $modules/rax_mon_alarm.py: - maintainers: smashwilson - $modules/rax_mon_check.py: - maintainers: smashwilson - $modules/rax_mon_entity.py: - maintainers: smashwilson - $modules/rax_mon_notification.py: - maintainers: smashwilson - $modules/rax_mon_notification_plan.py: - maintainers: smashwilson - $modules/rax_network.py: - maintainers: claco omgjlk - $modules/rax_queue.py: - maintainers: claco $modules/read_csv.py: maintainers: dagwieers $modules/redfish_: ignore: jose-delarosa - maintainers: $team_redfish + maintainers: $team_redfish TSKushal $modules/redhat_subscription.py: labels: redhat_subscription - maintainers: barnabycourt alikins kahowell + maintainers: $team_rhsm + ignore: barnabycourt alikins kahowell $modules/redis.py: maintainers: slok $modules/redis_data.py: @@ -1058,16 +1182,10 @@ files: keywords: kvm libvirt proxmox qemu labels: rhevm virt maintainers: $team_virt TimothyVandenbrande - $modules/rhn_channel.py: - labels: rhn_channel - maintainers: vincentvdk alikins $team_rhn - $modules/rhn_register.py: - labels: rhn_register - maintainers: jlaska $team_rhn $modules/rhsm_release.py: - maintainers: seandst + maintainers: seandst $team_rhsm $modules/rhsm_repository.py: - maintainers: giovannisciortino + maintainers: giovannisciortino $team_rhsm $modules/riak.py: maintainers: drewkerrigan jsmartin $modules/rocketchat.py: @@ -1088,10 +1206,6 @@ files: maintainers: nerzhul $modules/runit.py: maintainers: jsumners - $modules/sap_task_list_execute: - maintainers: rainerleber - $modules/sapcar_extract.py: - maintainers: RainerLeber $modules/say.py: maintainers: $team_ansible_core ignore: mpdehaan @@ -1100,9 +1214,9 @@ files: $modules/scaleway_compute_private_network.py: maintainers: pastral $modules/scaleway_container.py: - maintainers: Lunik + maintainers: Lunik $modules/scaleway_container_info.py: - maintainers: Lunik + maintainers: Lunik $modules/scaleway_container_namespace.py: maintainers: Lunik $modules/scaleway_container_namespace_info.py: @@ -1166,6 +1280,8 @@ files: ignore: ryansb $modules/shutdown.py: maintainers: nitzmahone samdoran aminvakil + $modules/simpleinit_msb.py: + maintainers: vaygr $modules/sl_vm.py: maintainers: mcltn $modules/slack.py: @@ -1178,7 +1294,7 @@ files: maintainers: $team_solaris $modules/snap.py: labels: snap - maintainers: angristan vcarceler + maintainers: angristan vcarceler russoz $modules/snap_alias.py: labels: snap maintainers: russoz @@ -1200,8 +1316,6 @@ files: maintainers: farhan7500 gautamphegde $modules/ssh_config.py: maintainers: gaqzi Akasurde - $modules/stackdriver.py: - maintainers: bwhaley $modules/stacki_host.py: labels: stacki_host maintainers: bsanders bbyhuy @@ -1231,6 +1345,12 @@ files: maintainers: precurse $modules/sysrc.py: maintainers: dlundgren + $modules/systemd_creds_decrypt.py: + maintainers: konstruktoid + $modules/systemd_creds_encrypt.py: + maintainers: konstruktoid + $modules/systemd_info.py: + maintainers: NomakCooper $modules/sysupgrade.py: maintainers: precurse $modules/taiga_issue.py: @@ -1256,20 +1376,25 @@ files: maintainers: nate-kingsley $modules/urpmi.py: maintainers: pmakowski + $modules/usb_facts.py: + maintainers: maxopoly $modules/utm_: keywords: sophos utm maintainers: $team_e_spirit $modules/utm_ca_host_key_cert.py: - maintainers: stearz + ignore: stearz + maintainers: $team_e_spirit $modules/utm_ca_host_key_cert_info.py: - maintainers: stearz + ignore: stearz + maintainers: $team_e_spirit $modules/utm_network_interface_address.py: maintainers: steamx $modules/utm_network_interface_address_info.py: maintainers: steamx $modules/utm_proxy_auth_profile.py: keywords: sophos utm - maintainers: $team_e_spirit stearz + ignore: stearz + maintainers: $team_e_spirit $modules/utm_proxy_exception.py: keywords: sophos utm maintainers: $team_e_spirit RickS-C137 @@ -1292,8 +1417,6 @@ files: maintainers: $team_wdc $modules/wdc_redfish_info.py: maintainers: $team_wdc - $modules/webfaction_: - maintainers: quentinsf $modules/xattr.py: labels: xattr maintainers: bcoca @@ -1301,6 +1424,8 @@ files: maintainers: dinoocch the-maldridge $modules/xcc_: maintainers: panyy3 renxulei + $modules/xdg_mime.py: + maintainers: mhalano $modules/xenserver_: maintainers: bvitnik $modules/xenserver_facts.py: @@ -1320,7 +1445,7 @@ files: labels: m:xml xml maintainers: dagwieers magnus919 tbielawa cmprescott sm4rk0 $modules/yarn.py: - maintainers: chrishoffman verkaufer + ignore: chrishoffman verkaufer $modules/yum_versionlock.py: maintainers: gyptazy aminvakil $modules/zfs: @@ -1333,6 +1458,8 @@ files: maintainers: natefoo $modules/znode.py: maintainers: treyperry + $modules/zpool.py: + maintainers: tomhesse $modules/zpool_facts: keywords: beadm dladm illumos ipadm nexenta omnios openindiana pfexec smartos solaris sunos zfs zpool labels: solaris @@ -1345,8 +1472,90 @@ files: ignore: matze labels: zypper maintainers: $team_suse + $plugin_utils/ansible_type.py: + maintainers: vbotka + $modules/zypper_repository_info.py: + labels: zypper + maintainers: $team_suse TobiasZeuch181 + $plugin_utils/keys_filter.py: + maintainers: vbotka + $plugin_utils/unsafe.py: + maintainers: felixfontein $tests/a_module.py: maintainers: felixfontein + $tests/ansible_type.py: + maintainers: vbotka + $tests/fqdn_valid.py: + maintainers: vbotka +######################### + docs/docsite/rst/filter_guide.rst: {} + docs/docsite/rst/filter_guide_abstract_informations.rst: {} + docs/docsite/rst/filter_guide_abstract_informations_counting_elements_in_sequence.rst: + maintainers: keilr + docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst: + maintainers: felixfontein giner + docs/docsite/rst/filter_guide_abstract_informations_grouping.rst: + maintainers: felixfontein + docs/docsite/rst/filter_guide_abstract_informations_lists_helper.rst: + maintainers: cfiehe + docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst: + maintainers: vbotka + docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst: + maintainers: vbotka + docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst: + maintainers: vbotka + docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst: + maintainers: vbotka + docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst: + maintainers: vbotka + docs/docsite/rst/filter_guide_conversions.rst: + maintainers: Ajpantuso kellyjonbrazil + docs/docsite/rst/filter_guide_creating_identifiers.rst: + maintainers: Ajpantuso + docs/docsite/rst/filter_guide_paths.rst: {} + docs/docsite/rst/filter_guide_selecting_json_data.rst: {} + docs/docsite/rst/filter_guide_working_with_times.rst: + maintainers: resmo + docs/docsite/rst/filter_guide_working_with_unicode.rst: + maintainers: Ajpantuso + docs/docsite/rst/filter_guide_working_with_versions.rst: + maintainers: ericzolf + docs/docsite/rst/guide_alicloud.rst: + maintainers: xiaozhu36 + docs/docsite/rst/guide_cmdrunner.rst: + maintainers: russoz + docs/docsite/rst/guide_deps.rst: + maintainers: russoz + docs/docsite/rst/guide_iocage.rst: + maintainers: russoz felixfontein + docs/docsite/rst/guide_iocage_inventory.rst: + maintainers: vbotka + docs/docsite/rst/guide_iocage_inventory_aliases.rst: + maintainers: vbotka + docs/docsite/rst/guide_iocage_inventory_basics.rst: + maintainers: vbotka + docs/docsite/rst/guide_iocage_inventory_dhcp.rst: + maintainers: vbotka + docs/docsite/rst/guide_iocage_inventory_hooks.rst: + maintainers: vbotka + docs/docsite/rst/guide_iocage_inventory_properties.rst: + maintainers: vbotka + docs/docsite/rst/guide_iocage_inventory_tags.rst: + maintainers: vbotka + docs/docsite/rst/guide_modulehelper.rst: + maintainers: russoz + docs/docsite/rst/guide_online.rst: + maintainers: remyleone + docs/docsite/rst/guide_packet.rst: + maintainers: baldwinSPC nurfet-becirevic t0mk teebes + docs/docsite/rst/guide_scaleway.rst: + maintainers: $team_scaleway + docs/docsite/rst/guide_uthelper.rst: + maintainers: russoz + docs/docsite/rst/guide_vardict.rst: + maintainers: russoz + docs/docsite/rst/test_guide.rst: + maintainers: felixfontein ######################### tests/: labels: tests @@ -1364,7 +1573,6 @@ macros: becomes: plugins/become caches: plugins/cache callbacks: plugins/callback - cliconfs: plugins/cliconf connections: plugins/connection doc_fragments: plugins/doc_fragments filters: plugins/filter @@ -1372,32 +1580,31 @@ macros: lookups: plugins/lookup module_utils: plugins/module_utils modules: plugins/modules - terminals: plugins/terminal + plugin_utils: plugins/plugin_utils tests: plugins/test team_ansible_core: team_aix: MorrisA bcoca d-little flynn1973 gforster kairoaraujo marvin-sinister mator molekuul ramooncamacho wtcross team_bsd: JoergFiedler MacLemon bcoca dch jasperla mekanix opoplawski overhacked tuxillo - team_consul: sgargan + team_consul: sgargan apollo13 Ilgmi team_cyberark_conjur: jvanderhoof ryanprior team_e_spirit: MatrixCrawler getjack team_flatpak: JayKayy oolongbrothers - team_gitlab: Lunik Shaps dj-wasabi marwatk waheedi zanssa scodeman metanovii sh0shin nejch lgatellier suukit + team_gitlab: Lunik Shaps marwatk waheedi zanssa scodeman metanovii sh0shin nejch lgatellier suukit team_hpux: bcoca davx8342 team_huawei: QijunPan TommyLike edisonxiang freesky-edward hwDCN niuzhenguo xuxiaowei0512 yanzhangi zengchen1024 zhongjun2 - team_ipa: Akasurde Nosmoht fxfitz justchris1 + team_ipa: Akasurde Nosmoht justchris1 team_jboss: Wolfant jairojunior wbrefvem - team_keycloak: eikef ndclt + team_keycloak: eikef ndclt mattock thomasbach-dev team_linode: InTheCloudDan decentral1se displague rmcintosh Charliekenney23 LBGarber team_macos: Akasurde kyleabenson martinm82 danieljaouen indrajitr team_manageiq: abellotti cben gtanzillo yaacov zgalor dkorn evertmulder team_networking: NilashishC Qalthos danielmellado ganeshrn justjais trishnaguha sganesh-infoblox privateip team_opennebula: ilicmilan meerkampdvv rsmontero xorel nilsding team_oracle: manojmeda mross22 nalsaber - team_purestorage: bannaych dnix101 genegr lionmax opslounge raekins sdodsley sile16 - team_redfish: mraineri tomasg2012 xmadsen renxulei rajeevkallur bhavya06 - team_rhn: FlossWare alikins barnabycourt vritant + team_redfish: mraineri tomasg2012 xmadsen renxulei rajeevkallur bhavya06 jyundt + team_rhsm: cnsnyder ptoscano team_scaleway: remyleone abarbare team_solaris: bcoca fishman jasperla jpdasma mator scathatheworm troy2914 xen0l - team_suse: commel evrardjp lrupp toabctl AnderEnder alxgu andytom sealor - team_virt: joshainglis karmab tleguern Thulium-Drake Ajpantuso + team_suse: commel evrardjp lrupp AnderEnder alxgu andytom sealor + team_virt: joshainglis karmab Thulium-Drake Ajpantuso team_wdc: mikemoerk diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index bd5030f2c2..4b1c1bfb95 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -7,147 +7,147 @@ name: Bug report description: Create a report to help us improve body: -- type: markdown - attributes: - value: | - ⚠ - Verify first that your issue is not [already reported on GitHub][issue search]. - Also test if the latest release and devel branch are affected too. - *Complete **all** sections as described, this form is processed automatically.* + - type: markdown + attributes: + value: | + ⚠ + Verify first that your issue is not [already reported on GitHub][issue search]. + Also test if the latest release and devel branch are affected too. + *Complete **all** sections as described, this form is processed automatically.* - [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues + [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues -- type: textarea - attributes: - label: Summary - description: Explain the problem briefly below. - placeholder: >- - When I try to do X with the collection from the main branch on GitHub, Y - breaks in a way Z under the env E. Here are all the details I know - about this problem... - validations: - required: true - -- type: dropdown - attributes: - label: Issue Type - # FIXME: Once GitHub allows defining the default choice, update this - options: - - Bug Report - validations: - required: true - -- type: textarea - attributes: - # For smaller collections we could use a multi-select and hardcode the list - # May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins - # Select from list, filter as you type (`mysql` would only show the 3 mysql components) - # OR freeform - doesn't seem to be supported in adaptivecards - label: Component Name - description: >- - Write the short name of the module, plugin, task or feature below, - *use your best guess if unsure*. - placeholder: dnf, apt, yum, pip, user etc. - validations: - required: true - -- type: textarea - attributes: - label: Ansible Version - description: >- - Paste verbatim output from `ansible --version` between - tripple backticks. - value: | - ```console (paste below) - $ ansible --version - - ``` - validations: - required: true - -- type: textarea - attributes: - label: Community.general Version - description: >- - Paste verbatim output from "ansible-galaxy collection list community.general" - between tripple backticks. - value: | - ```console (paste below) - $ ansible-galaxy collection list community.general - - ``` - validations: - required: true - -- type: textarea - attributes: - label: Configuration - description: >- - If this issue has an example piece of YAML that can help to reproduce this problem, please provide it. - This can be a piece of YAML from, e.g., an automation, script, scene or configuration. - Paste verbatim output from `ansible-config dump --only-changed` between quotes - value: | - ```console (paste below) - $ ansible-config dump --only-changed - - ``` - - -- type: textarea - attributes: - label: OS / Environment - description: >- - Provide all relevant information below, e.g. target OS versions, - network device firmware, etc. - placeholder: RHEL 8, CentOS Stream etc. - validations: - required: false - - -- type: textarea - attributes: - label: Steps to Reproduce - description: | - Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also passed any playbooks, configs and commands you used. - - **HINT:** You can paste https://gist.github.com links for larger files. - value: | - - ```yaml (paste below) - - ``` - validations: - required: true - -- type: textarea - attributes: - label: Expected Results - description: >- - Describe what you expected to happen when running the steps above. - placeholder: >- - I expected X to happen because I assumed Y. - that it did not. - validations: - required: true - -- type: textarea - attributes: - label: Actual Results - description: | - Describe what actually happened. If possible run with extra verbosity (`-vvvv`). - - Paste verbatim command output between quotes. - value: | - ```console (paste below) - - ``` -- type: checkboxes - attributes: - label: Code of Conduct - description: | - Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. - options: - - label: I agree to follow the Ansible Code of Conduct + - type: textarea + attributes: + label: Summary + description: Explain the problem briefly below. + placeholder: >- + When I try to do X with the collection from the main branch on GitHub, Y + breaks in a way Z under the env E. Here are all the details I know + about this problem... + validations: required: true + + - type: dropdown + attributes: + label: Issue Type + # FIXME: Once GitHub allows defining the default choice, update this + options: + - Bug Report + validations: + required: true + + - type: textarea + attributes: + # For smaller collections we could use a multi-select and hardcode the list + # May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins + # Select from list, filter as you type (`mysql` would only show the 3 mysql components) + # OR freeform - doesn't seem to be supported in adaptivecards + label: Component Name + description: >- + Write the short name of the module, plugin, task or feature below, + *use your best guess if unsure*. Do not include `community.general.`! + placeholder: dnf, apt, yum, pip, user etc. + validations: + required: true + + - type: textarea + attributes: + label: Ansible Version + description: >- + Paste verbatim output from `ansible --version` between + tripple backticks. + value: | + ```console (paste below) + $ ansible --version + + ``` + validations: + required: true + + - type: textarea + attributes: + label: Community.general Version + description: >- + Paste verbatim output from "ansible-galaxy collection list community.general" + between tripple backticks. + value: | + ```console (paste below) + $ ansible-galaxy collection list community.general + + ``` + validations: + required: true + + - type: textarea + attributes: + label: Configuration + description: >- + If this issue has an example piece of YAML that can help to reproduce this problem, please provide it. + This can be a piece of YAML from, e.g., an automation, script, scene or configuration. + Paste verbatim output from `ansible-config dump --only-changed` between quotes + value: | + ```console (paste below) + $ ansible-config dump --only-changed + + ``` + + + - type: textarea + attributes: + label: OS / Environment + description: >- + Provide all relevant information below, e.g. target OS versions, + network device firmware, etc. + placeholder: RHEL 8, CentOS Stream etc. + validations: + required: false + + + - type: textarea + attributes: + label: Steps to Reproduce + description: | + Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also passed any playbooks, configs and commands you used. + + **HINT:** You can paste https://gist.github.com links for larger files. + value: | + + ```yaml (paste below) + + ``` + validations: + required: true + + - type: textarea + attributes: + label: Expected Results + description: >- + Describe what you expected to happen when running the steps above. + placeholder: >- + I expected X to happen because I assumed Y. + that it did not. + validations: + required: true + + - type: textarea + attributes: + label: Actual Results + description: | + Describe what actually happened. If possible run with extra verbosity (`-vvvv`). + + Paste verbatim command output between quotes. + value: | + ```console (paste below) + + ``` + - type: checkboxes + attributes: + label: Code of Conduct + description: | + Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. + options: + - label: I agree to follow the Ansible Code of Conduct + required: true ... diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 0cc2db058c..476eed516e 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -6,26 +6,26 @@ # Ref: https://help.github.com/en/github/building-a-strong-community/configuring-issue-templates-for-your-repository#configuring-the-template-chooser blank_issues_enabled: false # default: true contact_links: -- name: Security bug report - url: https://docs.ansible.com/ansible-core/devel/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections - about: | - Please learn how to report security vulnerabilities here. + - name: Security bug report + url: https://docs.ansible.com/ansible-core/devel/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections + about: | + Please learn how to report security vulnerabilities here. - For all security related bugs, email security@ansible.com - instead of using this issue tracker and you will receive - a prompt response. + For all security related bugs, email security@ansible.com + instead of using this issue tracker and you will receive + a prompt response. - For more information, see - https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html -- name: Ansible Code of Conduct - url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections - about: Be nice to other members of the community. -- name: Talks to the community - url: https://docs.ansible.com/ansible/latest/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information - about: Please ask and answer usage questions here -- name: Working groups - url: https://github.com/ansible/community/wiki - about: Interested in improving a specific area? Become a part of a working group! -- name: For Enterprise - url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser_ansible_collections - about: Red Hat offers support for the Ansible Automation Platform + For more information, see + https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html + - name: Ansible Code of Conduct + url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections + about: Be nice to other members of the community. + - name: Talks to the community + url: https://docs.ansible.com/ansible/latest/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information + about: Please ask and answer usage questions here + - name: Working groups + url: https://github.com/ansible/community/wiki + about: Interested in improving a specific area? Become a part of a working group! + - name: For Enterprise + url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser_ansible_collections + about: Red Hat offers support for the Ansible Automation Platform diff --git a/.github/ISSUE_TEMPLATE/documentation_report.yml b/.github/ISSUE_TEMPLATE/documentation_report.yml index 3a2777f207..2ad4bce44a 100644 --- a/.github/ISSUE_TEMPLATE/documentation_report.yml +++ b/.github/ISSUE_TEMPLATE/documentation_report.yml @@ -8,122 +8,122 @@ description: Ask us about docs # NOTE: issue body is enabled to allow screenshots body: -- type: markdown - attributes: - value: | - ⚠ - Verify first that your issue is not [already reported on GitHub][issue search]. - Also test if the latest release and devel branch are affected too. - *Complete **all** sections as described, this form is processed automatically.* + - type: markdown + attributes: + value: | + ⚠ + Verify first that your issue is not [already reported on GitHub][issue search]. + Also test if the latest release and devel branch are affected too. + *Complete **all** sections as described, this form is processed automatically.* - [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues + [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues -- type: textarea - attributes: - label: Summary - description: | - Explain the problem briefly below, add suggestions to wording or structure. + - type: textarea + attributes: + label: Summary + description: | + Explain the problem briefly below, add suggestions to wording or structure. - **HINT:** Did you know the documentation has an `Edit on GitHub` link on every page? - placeholder: >- - I was reading the Collection documentation of version X and I'm having - problems understanding Y. It would be very helpful if that got - rephrased as Z. - validations: - required: true - -- type: dropdown - attributes: - label: Issue Type - # FIXME: Once GitHub allows defining the default choice, update this - options: - - Documentation Report - validations: - required: true - -- type: input - attributes: - label: Component Name - description: >- - Write the short name of the rst file, module, plugin, task or - feature below, *use your best guess if unsure*. - placeholder: mysql_user - validations: - required: true - -- type: textarea - attributes: - label: Ansible Version - description: >- - Paste verbatim output from `ansible --version` between - tripple backticks. - value: | - ```console (paste below) - $ ansible --version - - ``` - validations: - required: false - -- type: textarea - attributes: - label: Community.general Version - description: >- - Paste verbatim output from "ansible-galaxy collection list community.general" - between tripple backticks. - value: | - ```console (paste below) - $ ansible-galaxy collection list community.general - - ``` - validations: - required: true - -- type: textarea - attributes: - label: Configuration - description: >- - Paste verbatim output from `ansible-config dump --only-changed` between quotes. - value: | - ```console (paste below) - $ ansible-config dump --only-changed - - ``` - validations: - required: false - -- type: textarea - attributes: - label: OS / Environment - description: >- - Provide all relevant information below, e.g. OS version, - browser, etc. - placeholder: Fedora 33, Firefox etc. - validations: - required: false - -- type: textarea - attributes: - label: Additional Information - description: | - Describe how this improves the documentation, e.g. before/after situation or screenshots. - - **Tip:** It's not possible to upload the screenshot via this field directly but you can use the last textarea in this form to attach them. - - **HINT:** You can paste https://gist.github.com links for larger files. - placeholder: >- - When the improvement is applied, it makes it more straightforward - to understand X. - validations: - required: false - -- type: checkboxes - attributes: - label: Code of Conduct - description: | - Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. - options: - - label: I agree to follow the Ansible Code of Conduct + **HINT:** Did you know the documentation has an `Edit on GitHub` link on every page? + placeholder: >- + I was reading the Collection documentation of version X and I'm having + problems understanding Y. It would be very helpful if that got + rephrased as Z. + validations: required: true + + - type: dropdown + attributes: + label: Issue Type + # FIXME: Once GitHub allows defining the default choice, update this + options: + - Documentation Report + validations: + required: true + + - type: input + attributes: + label: Component Name + description: >- + Write the short name of the file, module, plugin, task or feature below, + *use your best guess if unsure*. Do not include `community.general.`! + placeholder: mysql_user + validations: + required: true + + - type: textarea + attributes: + label: Ansible Version + description: >- + Paste verbatim output from `ansible --version` between + tripple backticks. + value: | + ```console (paste below) + $ ansible --version + + ``` + validations: + required: false + + - type: textarea + attributes: + label: Community.general Version + description: >- + Paste verbatim output from "ansible-galaxy collection list community.general" + between tripple backticks. + value: | + ```console (paste below) + $ ansible-galaxy collection list community.general + + ``` + validations: + required: true + + - type: textarea + attributes: + label: Configuration + description: >- + Paste verbatim output from `ansible-config dump --only-changed` between quotes. + value: | + ```console (paste below) + $ ansible-config dump --only-changed + + ``` + validations: + required: false + + - type: textarea + attributes: + label: OS / Environment + description: >- + Provide all relevant information below, e.g. OS version, + browser, etc. + placeholder: Fedora 33, Firefox etc. + validations: + required: false + + - type: textarea + attributes: + label: Additional Information + description: | + Describe how this improves the documentation, e.g. before/after situation or screenshots. + + **Tip:** It's not possible to upload the screenshot via this field directly but you can use the last textarea in this form to attach them. + + **HINT:** You can paste https://gist.github.com links for larger files. + placeholder: >- + When the improvement is applied, it makes it more straightforward + to understand X. + validations: + required: false + + - type: checkboxes + attributes: + label: Code of Conduct + description: | + Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. + options: + - label: I agree to follow the Ansible Code of Conduct + required: true ... diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index 9630b67e12..dc62f94c5c 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -7,67 +7,67 @@ name: Feature request description: Suggest an idea for this project body: -- type: markdown - attributes: - value: | - ⚠ - Verify first that your issue is not [already reported on GitHub][issue search]. - Also test if the latest release and devel branch are affected too. - *Complete **all** sections as described, this form is processed automatically.* + - type: markdown + attributes: + value: | + ⚠ + Verify first that your issue is not [already reported on GitHub][issue search]. + Also test if the latest release and devel branch are affected too. + *Complete **all** sections as described, this form is processed automatically.* - [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues + [issue search]: https://github.com/ansible-collections/community.general/search?q=is%3Aissue&type=issues -- type: textarea - attributes: - label: Summary - description: Describe the new feature/improvement briefly below. - placeholder: >- - I am trying to do X with the collection from the main branch on GitHub and - I think that implementing a feature Y would be very helpful for me and - every other user of community.general because of Z. - validations: - required: true - -- type: dropdown - attributes: - label: Issue Type - # FIXME: Once GitHub allows defining the default choice, update this - options: - - Feature Idea - validations: - required: true - -- type: input - attributes: - label: Component Name - description: >- - Write the short name of the module, plugin, task or feature below, - *use your best guess if unsure*. - placeholder: dnf, apt, yum, pip, user etc. - validations: - required: true - -- type: textarea - attributes: - label: Additional Information - description: | - Describe how the feature would be used, why it is needed and what it would solve. - - **HINT:** You can paste https://gist.github.com links for larger files. - value: | - - ```yaml (paste below) - - ``` - validations: - required: false -- type: checkboxes - attributes: - label: Code of Conduct - description: | - Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. - options: - - label: I agree to follow the Ansible Code of Conduct + - type: textarea + attributes: + label: Summary + description: Describe the new feature/improvement briefly below. + placeholder: >- + I am trying to do X with the collection from the main branch on GitHub and + I think that implementing a feature Y would be very helpful for me and + every other user of community.general because of Z. + validations: required: true + + - type: dropdown + attributes: + label: Issue Type + # FIXME: Once GitHub allows defining the default choice, update this + options: + - Feature Idea + validations: + required: true + + - type: input + attributes: + label: Component Name + description: >- + Write the short name of the module or plugin, or which other part(s) of the collection this feature affects. + *use your best guess if unsure*. Do not include `community.general.`! + placeholder: dnf, apt, yum, pip, user etc. + validations: + required: true + + - type: textarea + attributes: + label: Additional Information + description: | + Describe how the feature would be used, why it is needed and what it would solve. + + **HINT:** You can paste https://gist.github.com links for larger files. + value: | + + ```yaml (paste below) + + ``` + validations: + required: false + - type: checkboxes + attributes: + label: Code of Conduct + description: | + Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. + options: + - label: I agree to follow the Ansible Code of Conduct + required: true ... diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 2f4ff900d8..f71b322d2a 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -9,3 +9,7 @@ updates: directory: "/" schedule: interval: "weekly" + groups: + ci: + patterns: + - "*" diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000000..29a2d2e36a --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,32 @@ +##### SUMMARY + + + + + + +##### ISSUE TYPE + +- Bugfix Pull Request +- Docs Pull Request +- Feature Pull Request +- New Module/Plugin Pull Request +- Refactoring Pull Request +- Test Pull Request + +##### COMPONENT NAME + + +##### ADDITIONAL INFORMATION + + + + +```paste below + +``` diff --git a/docs/docsite/helper/lists_mergeby/list3.out.j2 b/.github/pull_request_template.md.license similarity index 79% rename from docs/docsite/helper/lists_mergeby/list3.out.j2 rename to .github/pull_request_template.md.license index b51f6b8681..a1390a69ed 100644 --- a/docs/docsite/helper/lists_mergeby/list3.out.j2 +++ b/.github/pull_request_template.md.license @@ -1,7 +1,3 @@ -{# Copyright (c) Ansible Project GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) SPDX-License-Identifier: GPL-3.0-or-later -#} -list3: -{{ list3|to_nice_yaml(indent=0) }} diff --git a/.github/workflows/ansible-test.yml b/.github/workflows/ansible-test.yml new file mode 100644 index 0000000000..616c7a843c --- /dev/null +++ b/.github/workflows/ansible-test.yml @@ -0,0 +1,176 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +# For the comprehensive list of the inputs supported by the ansible-community/ansible-test-gh-action GitHub Action, see +# https://github.com/marketplace/actions/ansible-test + +name: EOL CI +"on": + # Run EOL CI against all pushes (direct commits, also merged PRs), Pull Requests + push: + branches: + - main + - stable-* + pull_request: + # Run EOL CI once per day (at 08:00 UTC) + schedule: + - cron: '0 8 * * *' + +concurrency: + # Make sure there is at most one active run per PR, but do not cancel any non-PR runs + group: ${{ github.workflow }}-${{ (github.head_ref && github.event.number) || github.run_id }} + cancel-in-progress: true + +jobs: + sanity: + name: EOL Sanity (Ⓐ${{ matrix.ansible }}) + strategy: + matrix: + ansible: + - '2.17' + runs-on: ubuntu-latest + steps: + - name: Perform sanity testing + uses: felixfontein/ansible-test-gh-action@main + with: + ansible-core-version: stable-${{ matrix.ansible }} + codecov-token: ${{ secrets.CODECOV_TOKEN }} + coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }} + pull-request-change-detection: 'true' + testing-type: sanity + pre-test-cmd: >- + git clone --depth=1 --single-branch https://github.com/ansible-collections/community.internal_test_tools.git ../../community/internal_test_tools + + units: + runs-on: ubuntu-latest + name: EOL Units (Ⓐ${{ matrix.ansible }}+py${{ matrix.python }}) + strategy: + # As soon as the first unit test fails, cancel the others to free up the CI queue + fail-fast: true + matrix: + ansible: + - '' + python: + - '' + exclude: + - ansible: '' + include: + - ansible: '2.17' + python: '3.7' + - ansible: '2.17' + python: '3.10' + - ansible: '2.17' + python: '3.12' + + steps: + - name: >- + Perform unit testing against + Ansible version ${{ matrix.ansible }} + uses: felixfontein/ansible-test-gh-action@main + with: + ansible-core-version: stable-${{ matrix.ansible }} + codecov-token: ${{ secrets.CODECOV_TOKEN }} + coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }} + pre-test-cmd: >- + mkdir -p ../../ansible + ; + git clone --depth=1 --single-branch https://github.com/ansible-collections/community.internal_test_tools.git ../../community/internal_test_tools + pull-request-change-detection: 'true' + target-python-version: ${{ matrix.python }} + testing-type: units + + integration: + runs-on: ubuntu-latest + name: EOL I (Ⓐ${{ matrix.ansible }}+${{ matrix.docker }}+py${{ matrix.python }}:${{ matrix.target }}) + strategy: + fail-fast: false + matrix: + ansible: + - '' + docker: + - '' + python: + - '' + target: + - '' + exclude: + - ansible: '' + include: + # 2.17 + - ansible: '2.17' + docker: fedora39 + python: '' + target: azp/posix/1/ + - ansible: '2.17' + docker: fedora39 + python: '' + target: azp/posix/2/ + - ansible: '2.17' + docker: fedora39 + python: '' + target: azp/posix/3/ + - ansible: '2.17' + docker: ubuntu2004 + python: '' + target: azp/posix/1/ + - ansible: '2.17' + docker: ubuntu2004 + python: '' + target: azp/posix/2/ + - ansible: '2.17' + docker: ubuntu2004 + python: '' + target: azp/posix/3/ + - ansible: '2.17' + docker: alpine319 + python: '' + target: azp/posix/1/ + - ansible: '2.17' + docker: alpine319 + python: '' + target: azp/posix/2/ + - ansible: '2.17' + docker: alpine319 + python: '' + target: azp/posix/3/ + # Right now all generic tests are disabled. Uncomment when at least one of them is re-enabled. + # - ansible: '2.17' + # docker: default + # python: '3.7' + # target: azp/generic/1/ + # - ansible: '2.17' + # docker: default + # python: '3.12' + # target: azp/generic/1/ + + steps: + - name: >- + Perform integration testing against + Ansible version ${{ matrix.ansible }} + under Python ${{ matrix.python }} + uses: felixfontein/ansible-test-gh-action@main + with: + ansible-core-version: stable-${{ matrix.ansible }} + codecov-token: ${{ secrets.CODECOV_TOKEN }} + coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }} + docker-image: ${{ matrix.docker }} + integration-continue-on-error: 'false' + integration-diff: 'false' + integration-retry-on-error: 'true' + # TODO: remove "--branch stable-2" from community.crypto install once we're only using ansible-core 2.17 or newer! + pre-test-cmd: >- + mkdir -p ../../ansible + ; + git clone --depth=1 --single-branch https://github.com/ansible-collections/ansible.posix.git ../../ansible/posix + ; + git clone --depth=1 --single-branch --branch stable-2 https://github.com/ansible-collections/community.crypto.git ../../community/crypto + ; + git clone --depth=1 --single-branch https://github.com/ansible-collections/community.docker.git ../../community/docker + ; + git clone --depth=1 --single-branch https://github.com/ansible-collections/community.internal_test_tools.git ../../community/internal_test_tools + pull-request-change-detection: 'true' + target: ${{ matrix.target }} + target-python-version: ${{ matrix.python }} + testing-type: integration diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index f7ab9450cc..3c6776929d 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -5,9 +5,10 @@ name: "Code scanning - action" -on: +"on": schedule: - cron: '26 19 * * 1' + workflow_dispatch: permissions: contents: read @@ -22,40 +23,16 @@ jobs: runs-on: ubuntu-latest steps: - - name: Checkout repository - uses: actions/checkout@v3 - with: - # We must fetch at least the immediate parents so that if this is - # a pull request then we can checkout the head. - fetch-depth: 2 + - name: Checkout repository + uses: actions/checkout@v5 + with: + persist-credentials: false - # If this run was triggered by a pull request event, then checkout - # the head of the pull request instead of the merge commit. - - run: git checkout HEAD^2 - if: ${{ github.event_name == 'pull_request' }} + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v4 + with: + languages: python - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@v2 - # Override language selection by uncommenting this and choosing your languages - # with: - # languages: go, javascript, csharp, python, cpp, java - - # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). - # If this step fails, then you should remove it and run the build manually (see below) - - name: Autobuild - uses: github/codeql-action/autobuild@v2 - - # ℹ️ Command-line programs to run using the OS shell. - # 📚 https://git.io/JvXDl - - # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines - # and modify them (or add more) to build your code if your project - # uses a compiled language - - #- run: | - # make bootstrap - # make release - - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v4 diff --git a/.github/workflows/nox.yml b/.github/workflows/nox.yml new file mode 100644 index 0000000000..81c6563811 --- /dev/null +++ b/.github/workflows/nox.yml @@ -0,0 +1,28 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +name: nox +'on': + push: + branches: + - main + - stable-* + pull_request: + # Run CI once per day (at 08:00 UTC) + schedule: + - cron: '0 8 * * *' + workflow_dispatch: + +jobs: + nox: + runs-on: ubuntu-latest + name: "Run extra sanity tests" + steps: + - name: Check out collection + uses: actions/checkout@v5 + with: + persist-credentials: false + - name: Run nox + uses: ansible-community/antsibull-nox@main diff --git a/.github/workflows/reuse.yml b/.github/workflows/reuse.yml deleted file mode 100644 index 8467668f10..0000000000 --- a/.github/workflows/reuse.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -name: Verify REUSE - -on: - push: - branches: [main] - pull_request_target: - types: [opened, synchronize, reopened] - branches: [main] - # Run CI once per day (at 07:30 UTC) - schedule: - - cron: '30 7 * * *' - -jobs: - check: - permissions: - contents: read - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.pull_request.head.sha || '' }} - - - name: Install dependencies - run: | - pip install reuse - - - name: Check REUSE compliance - run: | - reuse lint diff --git a/.gitignore b/.gitignore index b7868a9e41..e427699798 100644 --- a/.gitignore +++ b/.gitignore @@ -383,6 +383,16 @@ cython_debug/ # option (not recommended) you can uncomment the following to ignore the entire idea folder. #.idea/ +### Python Patch ### +# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration +poetry.toml + +# ruff +.ruff_cache/ + +# LSP config files +pyrightconfig.json + ### Vim ### # Swap [._]*.s[a-v][a-z] @@ -482,6 +492,10 @@ tags # https://plugins.jetbrains.com/plugin/12206-codestream .idea/codestream.xml +# Azure Toolkit for IntelliJ plugin +# https://plugins.jetbrains.com/plugin/8053-azure-toolkit-for-intellij +.idea/**/azureSettings.xml + ### Windows ### # Windows thumbnail cache files Thumbs.db @@ -512,3 +526,8 @@ $RECYCLE.BIN/ # Integration tests cloud configs tests/integration/cloud-config-*.ini + + +# VSCode specific extensions +.vscode/settings.json +.ansible diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml deleted file mode 100644 index 7e3d19094b..0000000000 --- a/.pre-commit-config.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -repos: - - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.0.1 - hooks: - - id: trailing-whitespace - - id: end-of-file-fixer - - id: mixed-line-ending - args: [--fix=lf] - - id: fix-encoding-pragma - - id: check-ast - - id: check-merge-conflict - - id: check-symlinks - - repo: https://github.com/pre-commit/pygrep-hooks - rev: v1.9.0 - hooks: - - id: rst-backticks - types: [file] - files: changelogs/fragments/.*\.(yml|yaml)$ diff --git a/.reuse/dep5 b/.reuse/dep5 deleted file mode 100644 index 0c3745ebf8..0000000000 --- a/.reuse/dep5 +++ /dev/null @@ -1,5 +0,0 @@ -Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ - -Files: changelogs/fragments/* -Copyright: Ansible Project -License: GPL-3.0-or-later diff --git a/.yamllint b/.yamllint new file mode 100644 index 0000000000..c10d86ab19 --- /dev/null +++ b/.yamllint @@ -0,0 +1,52 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +extends: default + +ignore: | + /changelogs/ + +rules: + line-length: + max: 1000 + level: error + document-start: disable + document-end: disable + truthy: + level: error + allowed-values: + - 'true' + - 'false' + indentation: + spaces: 2 + indent-sequences: true + key-duplicates: enable + trailing-spaces: enable + new-line-at-end-of-file: disable + hyphens: + max-spaces-after: 1 + empty-lines: + max: 2 + max-start: 0 + max-end: 0 + commas: + max-spaces-before: 0 + min-spaces-after: 1 + max-spaces-after: 1 + colons: + max-spaces-before: 0 + max-spaces-after: 1 + brackets: + min-spaces-inside: 0 + max-spaces-inside: 0 + braces: + min-spaces-inside: 0 + max-spaces-inside: 1 + octal-values: + forbid-implicit-octal: true + forbid-explicit-octal: true + comments: + min-spaces-from-content: 1 + comments-indentation: false diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000000..b35c52441b --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,5 @@ +# Placeholder changelog + +This file is a placeholder; a version-specific `CHANGELOG-vX.md` will be generated during releases from fragments +under `changelogs/fragments`. On release branches once a release has been created, consult the branch's version-specific +file for changes that have occurred in that branch. diff --git a/tests/sanity/extra/aliases.json.license b/CHANGELOG.md.license similarity index 100% rename from tests/sanity/extra/aliases.json.license rename to CHANGELOG.md.license diff --git a/CHANGELOG.rst b/CHANGELOG.rst index b41f75654b..119e04e170 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,485 +1,6 @@ -=============================== -Community General Release Notes -=============================== +Placeholder changelog +===================== -.. contents:: Topics - -This changelog describes changes after version 5.0.0. - -v6.3.0 -====== - -Release Summary ---------------- - -Regular bugfix and feature release. - -Minor Changes -------------- - -- apache2_module - add module argument ``warn_mpm_absent`` to control whether warning are raised in some edge cases (https://github.com/ansible-collections/community.general/pull/5793). -- bitwarden lookup plugin - can now retrieve secrets from custom fields (https://github.com/ansible-collections/community.general/pull/5694). -- bitwarden lookup plugin - implement filtering results by ``collection_id`` parameter (https://github.com/ansible-collections/community.general/issues/5849). -- dig lookup plugin - support CAA record type (https://github.com/ansible-collections/community.general/pull/5913). -- gitlab_project - add ``builds_access_level``, ``container_registry_access_level`` and ``forking_access_level`` options (https://github.com/ansible-collections/community.general/pull/5706). -- gitlab_runner - add new boolean option ``access_level_on_creation``. It controls, whether the value of ``access_level`` is used for runner registration or not. The option ``access_level`` has been ignored on registration so far and was only used on updates (https://github.com/ansible-collections/community.general/issues/5907, https://github.com/ansible-collections/community.general/pull/5908). -- ilo_redfish_utils module utils - change implementation of DNS Server IP and NTP Server IP update (https://github.com/ansible-collections/community.general/pull/5804). -- ipa_group - allow to add and remove external users with the ``external_user`` option (https://github.com/ansible-collections/community.general/pull/5897). -- iptables_state - minor refactoring within the module (https://github.com/ansible-collections/community.general/pull/5844). -- one_vm - add a new ``updateconf`` option which implements the ``one.vm.updateconf`` API call (https://github.com/ansible-collections/community.general/pull/5812). -- opkg - refactored module to use ``CmdRunner`` for executing ``opkg`` (https://github.com/ansible-collections/community.general/pull/5718). -- redhat_subscription - adds ``token`` parameter for subscription-manager authentication using Red Hat API token (https://github.com/ansible-collections/community.general/pull/5725). -- snap - minor refactor when executing module (https://github.com/ansible-collections/community.general/pull/5773). -- snap_alias - refactored module to use ``CmdRunner`` to execute ``snap`` (https://github.com/ansible-collections/community.general/pull/5486). -- sudoers - add ``setenv`` parameters to support passing environment variables via sudo. (https://github.com/ansible-collections/community.general/pull/5883) - -Breaking Changes / Porting Guide --------------------------------- - -- ModuleHelper module utils - when the module sets output variables named ``msg``, ``exception``, ``output``, ``vars``, or ``changed``, the actual output will prefix those names with ``_`` (underscore symbol) only when they clash with output variables generated by ModuleHelper itself, which only occurs when handling exceptions. Please note that this breaking change does not require a new major release since before this release, it was not possible to add such variables to the output `due to a bug `__ (https://github.com/ansible-collections/community.general/pull/5765). - -Deprecated Features -------------------- - -- consul - deprecate using parameters unused for ``state=absent`` (https://github.com/ansible-collections/community.general/pull/5772). -- gitlab_runner - the default of the new option ``access_level_on_creation`` will change from ``false`` to ``true`` in community.general 7.0.0. This will cause ``access_level`` to be used during runner registration as well, and not only during updates (https://github.com/ansible-collections/community.general/pull/5908). - -Bugfixes --------- - -- ModuleHelper - fix bug when adjusting the name of reserved output variables (https://github.com/ansible-collections/community.general/pull/5755). -- alternatives - support subcommands on Fedora 37, which uses ``follower`` instead of ``slave`` (https://github.com/ansible-collections/community.general/pull/5794). -- bitwarden lookup plugin - clarify what to do, if the bitwarden vault is not unlocked (https://github.com/ansible-collections/community.general/pull/5811). -- dig lookup plugin - correctly handle DNSKEY record type's ``algorithm`` field (https://github.com/ansible-collections/community.general/pull/5914). -- gem - fix force parameter not being passed to gem command when uninstalling (https://github.com/ansible-collections/community.general/pull/5822). -- gem - fix hang due to interactive prompt for confirmation on specific version uninstall (https://github.com/ansible-collections/community.general/pull/5751). -- gitlab_deploy_key - also update ``title`` and not just ``can_push`` (https://github.com/ansible-collections/community.general/pull/5888). -- keycloak_user_federation - fixes federation creation issue. When a new federation was created and at the same time a default / standard mapper was also changed / updated the creation process failed as a bad None set variable led to a bad malformed url request (https://github.com/ansible-collections/community.general/pull/5750). -- keycloak_user_federation - fixes idempotency detection issues. In some cases the module could fail to properly detect already existing user federations because of a buggy seemingly superflous extra query parameter (https://github.com/ansible-collections/community.general/pull/5732). -- loganalytics callback plugin - adjust type of callback to ``notification``, it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761). -- logdna callback plugin - adjust type of callback to ``notification``, it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761). -- logstash callback plugin - adjust type of callback to ``notification``, it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761). -- nsupdate - fix zone lookup. The SOA record for an existing zone is returned as an answer RR and not as an authority RR (https://github.com/ansible-collections/community.general/issues/5817, https://github.com/ansible-collections/community.general/pull/5818). -- proxmox_disk - fixed issue with read timeout on import action (https://github.com/ansible-collections/community.general/pull/5803). -- redfish_utils - removed basic auth HTTP header when performing a GET on the service root resource and when performing a POST to the session collection (https://github.com/ansible-collections/community.general/issues/5886). -- splunk callback plugin - adjust type of callback to ``notification``, it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761). -- sumologic callback plugin - adjust type of callback to ``notification``, it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761). -- syslog_json callback plugin - adjust type of callback to ``notification``, it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761). -- terraform - fix ``current`` workspace never getting appended to the ``all`` key in the ``workspace_ctf`` object (https://github.com/ansible-collections/community.general/pull/5735). -- terraform - fix ``terraform init`` failure when there are multiple workspaces on the remote backend and when ``default`` workspace is missing by setting ``TF_WORKSPACE`` environmental variable to the value of ``workspace`` when used (https://github.com/ansible-collections/community.general/pull/5735). -- terraform module - disable ANSI escape sequences during validation phase (https://github.com/ansible-collections/community.general/pull/5843). -- xml - fixed a bug where empty ``children`` list would not be set (https://github.com/ansible-collections/community.general/pull/5808). - -New Modules ------------ - -- ocapi_command - Manages Out-Of-Band controllers using Open Composable API (OCAPI) -- ocapi_info - Manages Out-Of-Band controllers using Open Composable API (OCAPI) - -v6.2.0 -====== - -Release Summary ---------------- - -Regular bugfix and feature release. - -Minor Changes -------------- - -- opkg - allow installing a package in a certain version (https://github.com/ansible-collections/community.general/pull/5688). -- proxmox - added new module parameter ``tags`` for use with PVE 7+ (https://github.com/ansible-collections/community.general/pull/5714). -- puppet - refactored module to use ``CmdRunner`` for executing ``puppet`` (https://github.com/ansible-collections/community.general/pull/5612). -- redhat_subscription - add a ``server_proxy_scheme`` parameter to configure the scheme for the proxy server (https://github.com/ansible-collections/community.general/pull/5662). -- ssh_config - refactor code to module util to fix sanity check (https://github.com/ansible-collections/community.general/pull/5720). -- sudoers - adds ``host`` parameter for setting hostname restrictions in sudoers rules (https://github.com/ansible-collections/community.general/issues/5702). - -Deprecated Features -------------------- - -- manageiq_policies - deprecate ``state=list`` in favour of using ``community.general.manageiq_policies_info`` (https://github.com/ansible-collections/community.general/pull/5721). -- rax - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). -- rax_cbs - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). -- rax_cbs_attachments - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). -- rax_cdb - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). -- rax_cdb_database - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). -- rax_cdb_user - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). -- rax_clb - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). -- rax_clb_nodes - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). -- rax_clb_ssl - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). -- rax_dns - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). -- rax_dns_record - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). -- rax_facts - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). -- rax_files - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). -- rax_files_objects - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). -- rax_identity - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). -- rax_keypair - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). -- rax_meta - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). -- rax_mon_alarm - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). -- rax_mon_check - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). -- rax_mon_entity - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). -- rax_mon_notification - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). -- rax_mon_notification_plan - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). -- rax_network - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). -- rax_queue - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). -- rax_scaling_group - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). -- rax_scaling_policy - module relies on deprecates library ``pyrax``. Unless maintainers step up to work on the module, it will be marked as deprecated in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). - -Bugfixes --------- - -- ansible_galaxy_install - set default to raise exception if command's return code is different from zero (https://github.com/ansible-collections/community.general/pull/5680). -- ansible_galaxy_install - try ``C.UTF-8`` and then fall back to ``en_US.UTF-8`` before failing (https://github.com/ansible-collections/community.general/pull/5680). -- gitlab_group_variables - fix dropping variables accidentally when GitLab introduced new properties (https://github.com/ansible-collections/community.general/pull/5667). -- gitlab_project_variables - fix dropping variables accidentally when GitLab introduced new properties (https://github.com/ansible-collections/community.general/pull/5667). -- lxc_container - fix the arguments of the lxc command which broke the creation and cloning of containers (https://github.com/ansible-collections/community.general/issues/5578). -- opkg - fix issue that ``force=reinstall`` would not reinstall an existing package (https://github.com/ansible-collections/community.general/pull/5705). -- proxmox_disk - fixed possible issues with redundant ``vmid`` parameter (https://github.com/ansible-collections/community.general/issues/5492, https://github.com/ansible-collections/community.general/pull/5672). -- proxmox_nic - fixed possible issues with redundant ``vmid`` parameter (https://github.com/ansible-collections/community.general/issues/5492, https://github.com/ansible-collections/community.general/pull/5672). -- unixy callback plugin - fix typo introduced when updating to use Ansible's configuration manager for handling options (https://github.com/ansible-collections/community.general/issues/5600). - -v6.1.0 -====== - -Release Summary ---------------- - -Regular bugfix and feature release. - -Minor Changes -------------- - -- cmd_runner module utils - ``cmd_runner_fmt.as_bool()`` can now take an extra parameter to format when value is false (https://github.com/ansible-collections/community.general/pull/5647). -- gconftool2 - refactor using ``ModuleHelper`` and ``CmdRunner`` (https://github.com/ansible-collections/community.general/pull/5545). -- java_certs - add more detailed error output when extracting certificate from PKCS12 fails (https://github.com/ansible-collections/community.general/pull/5550). -- jenkins_plugin - refactor code to module util to fix sanity check (https://github.com/ansible-collections/community.general/pull/5565). -- lxd_project - refactored code out to module utils to clear sanity check (https://github.com/ansible-collections/community.general/pull/5549). -- nmap inventory plugin - add new options ``udp_scan``, ``icmp_timestamp``, and ``dns_resolve`` for different types of scans (https://github.com/ansible-collections/community.general/pull/5566). -- rax_scaling_group - refactored out code to the ``rax`` module utils to clear the sanity check (https://github.com/ansible-collections/community.general/pull/5563). -- redfish_command - add ``PerformRequestedOperations`` command to perform any operations necessary to continue the update flow (https://github.com/ansible-collections/community.general/issues/4276). -- redfish_command - add ``update_apply_time`` to ``SimpleUpdate`` command (https://github.com/ansible-collections/community.general/issues/3910). -- redfish_command - add ``update_status`` to output of ``SimpleUpdate`` command to allow a user monitor the update in progress (https://github.com/ansible-collections/community.general/issues/4276). -- redfish_info - add ``GetUpdateStatus`` command to check the progress of a previous update request (https://github.com/ansible-collections/community.general/issues/4276). -- redfish_utils module utils - added PUT (``put_request()``) functionality (https://github.com/ansible-collections/community.general/pull/5490). -- slack - add option ``prepend_hash`` which allows to control whether a ``#`` is prepended to ``channel_id``. The current behavior (value ``auto``) is to prepend ``#`` unless some specific prefixes are found. That list of prefixes is incomplete, and there does not seem to exist a documented condition on when exactly ``#`` must not be prepended. We recommend to explicitly set ``prepend_hash=always`` or ``prepend_hash=never`` to avoid any ambiguity (https://github.com/ansible-collections/community.general/pull/5629). -- spotinst_aws_elastigroup - add ``elements`` attribute when missing in ``list`` parameters (https://github.com/ansible-collections/community.general/pull/5553). -- ssh_config - add ``host_key_algorithms`` option (https://github.com/ansible-collections/community.general/pull/5605). -- udm_share - added ``elements`` attribute to ``list`` type parameters (https://github.com/ansible-collections/community.general/pull/5557). -- udm_user - add ``elements`` attribute when missing in ``list`` parameters (https://github.com/ansible-collections/community.general/pull/5559). - -Deprecated Features -------------------- - -- The ``sap`` modules ``sapcar_extract``, ``sap_task_list_execute``, and ``hana_query``, will be removed from this collection in community.general 7.0.0 and replaced with redirects to ``community.sap_libs``. If you want to continue using these modules, make sure to also install ``community.sap_libs`` (it is part of the Ansible package) (https://github.com/ansible-collections/community.general/pull/5614). - -Bugfixes --------- - -- chroot connection plugin - add ``inventory_hostname`` to vars under ``remote_addr``. This is needed for compatibility with ansible-core 2.13 (https://github.com/ansible-collections/community.general/pull/5570). -- cmd_runner module utils - fixed bug when handling default cases in ``cmd_runner_fmt.as_map()`` (https://github.com/ansible-collections/community.general/pull/5538). -- cmd_runner module utils - formatting arguments ``cmd_runner_fmt.as_fixed()`` was expecting an non-existing argument (https://github.com/ansible-collections/community.general/pull/5538). -- keycloak_client_rolemapping - calculate ``proposed`` and ``after`` return values properly (https://github.com/ansible-collections/community.general/pull/5619). -- keycloak_client_rolemapping - remove only listed mappings with ``state=absent`` (https://github.com/ansible-collections/community.general/pull/5619). -- proxmox inventory plugin - fix bug while templating when using templates for the ``url``, ``user``, ``password``, ``token_id``, or ``token_secret`` options (https://github.com/ansible-collections/community.general/pull/5640). -- proxmox inventory plugin - handle tags delimited by semicolon instead of comma, which happens from Proxmox 7.3 on (https://github.com/ansible-collections/community.general/pull/5602). -- redhat_subscription - do not ignore ``consumer_name`` and other variables if ``activationkey`` is specified (https://github.com/ansible-collections/community.general/issues/3486, https://github.com/ansible-collections/community.general/pull/5627). -- redhat_subscription - do not pass arguments to ``subscription-manager register`` for things already configured; now a specified ``rhsm_baseurl`` is properly set for subscription-manager (https://github.com/ansible-collections/community.general/pull/5583). -- unixy callback plugin - fix plugin to work with ansible-core 2.14 by using Ansible's configuration manager for handling options (https://github.com/ansible-collections/community.general/issues/5600). -- vdo - now uses ``yaml.safe_load()`` to parse command output instead of the deprecated ``yaml.load()`` which is potentially unsafe. Using ``yaml.load()`` without explicitely setting a ``Loader=`` is also an error in pyYAML 6.0 (https://github.com/ansible-collections/community.general/pull/5632). -- vmadm - fix for index out of range error in ``get_vm_uuid`` (https://github.com/ansible-collections/community.general/pull/5628). - -New Modules ------------ - -- gitlab_project_badge - Manage project badges on GitLab Server -- keycloak_clientsecret_info - Retrieve client secret via Keycloak API -- keycloak_clientsecret_regenerate - Regenerate Keycloak client secret via Keycloak API - -v6.0.1 -====== - -Release Summary ---------------- - -Bugfix release for Ansible 7.0.0. - -Bugfixes --------- - -- dependent lookup plugin - avoid warning on deprecated parameter for ``Templar.template()`` (https://github.com/ansible-collections/community.general/pull/5543). -- jenkins_build - fix the logical flaw when deleting a Jenkins build (https://github.com/ansible-collections/community.general/pull/5514). -- one_vm - avoid splitting labels that are ``None`` (https://github.com/ansible-collections/community.general/pull/5489). -- onepassword_raw - add missing parameter to plugin documentation (https://github.com/ansible-collections/community.general/issues/5506). -- proxmox_disk - avoid duplicate ``vmid`` reference (https://github.com/ansible-collections/community.general/issues/5492, https://github.com/ansible-collections/community.general/pull/5493). - -v6.0.0 -====== - -Release Summary ---------------- - -New major release of community.general with lots of bugfixes, new features, some removed deprecated features, and some other breaking changes. Please check the coresponding sections of the changelog for more details. - -Major Changes -------------- - -- The internal structure of the collection was changed for modules and action plugins. These no longer live in a directory hierarchy ordered by topic, but instead are now all in a single (flat) directory. This has no impact on users *assuming they did not use internal FQCNs*. These will still work, but result in deprecation warnings. They were never officially supported and thus the redirects are kept as a courtsey, and this is not labelled as a breaking change. Note that for example the Ansible VScode plugin started recommending these internal names. If you followed its recommendation, you will now have to change back to the short names to avoid deprecation warnings, and potential errors in the future as these redirects will be removed in community.general 9.0.0 (https://github.com/ansible-collections/community.general/pull/5461). -- newrelic_deployment - removed New Relic v1 API, added support for v2 API (https://github.com/ansible-collections/community.general/pull/5341). - -Minor Changes -------------- - -- Added MIT license as ``LICENSES/MIT.txt`` for tests/unit/plugins/modules/packaging/language/test_gem.py (https://github.com/ansible-collections/community.general/pull/5065). -- All software licenses are now in the ``LICENSES/`` directory of the collection root (https://github.com/ansible-collections/community.general/pull/5065, https://github.com/ansible-collections/community.general/pull/5079, https://github.com/ansible-collections/community.general/pull/5080, https://github.com/ansible-collections/community.general/pull/5083, https://github.com/ansible-collections/community.general/pull/5087, https://github.com/ansible-collections/community.general/pull/5095, https://github.com/ansible-collections/community.general/pull/5098, https://github.com/ansible-collections/community.general/pull/5106). -- ModuleHelper module utils - added property ``verbosity`` to base class (https://github.com/ansible-collections/community.general/pull/5035). -- ModuleHelper module utils - improved ``ModuleHelperException``, using ``to_native()`` for the exception message (https://github.com/ansible-collections/community.general/pull/4755). -- The collection repository conforms to the `REUSE specification `__ except for the changelog fragments (https://github.com/ansible-collections/community.general/pull/5138). -- ali_instance - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5240). -- ali_instance_info - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5240). -- alternatives - add ``state=absent`` to be able to remove an alternative (https://github.com/ansible-collections/community.general/pull/4654). -- alternatives - add ``subcommands`` parameter (https://github.com/ansible-collections/community.general/pull/4654). -- ansible_galaxy_install - minor refactoring using latest ``ModuleHelper`` updates (https://github.com/ansible-collections/community.general/pull/4752). -- ansible_galaxy_install - refactored module to use ``CmdRunner`` to execute ``ansible-galaxy`` (https://github.com/ansible-collections/community.general/pull/5477). -- apk - add ``world`` parameter for supporting a custom world file (https://github.com/ansible-collections/community.general/pull/4976). -- bitwarden lookup plugin - add option ``search`` to search for other attributes than name (https://github.com/ansible-collections/community.general/pull/5297). -- cartesian lookup plugin - start using Ansible's configuration manager to parse options (https://github.com/ansible-collections/community.general/pull/5440). -- cmd_runner module util - added parameters ``check_mode_skip`` and ``check_mode_return`` to ``CmdRunner.context()``, so that the command is not executed when ``check_mode=True`` (https://github.com/ansible-collections/community.general/pull/4736). -- cmd_runner module utils - add ``__call__`` method to invoke context (https://github.com/ansible-collections/community.general/pull/4791). -- consul - adds ``ttl`` parameter for session (https://github.com/ansible-collections/community.general/pull/4996). -- consul - minor refactoring (https://github.com/ansible-collections/community.general/pull/5367). -- consul_session - adds ``token`` parameter for session (https://github.com/ansible-collections/community.general/pull/5193). -- cpanm - refactored module to use ``CmdRunner`` to execute ``cpanm`` (https://github.com/ansible-collections/community.general/pull/5485). -- cpanm - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived modules (https://github.com/ansible-collections/community.general/pull/4674). -- credstash lookup plugin - start using Ansible's configuration manager to parse options (https://github.com/ansible-collections/community.general/pull/5440). -- dependent lookup plugin - start using Ansible's configuration manager to parse options (https://github.com/ansible-collections/community.general/pull/5440). -- dig lookup plugin - add option ``fail_on_error`` to allow stopping execution on lookup failures (https://github.com/ansible-collections/community.general/pull/4973). -- dig lookup plugin - start using Ansible's configuration manager to parse options. All documented options can now also be passed as lookup parameters (https://github.com/ansible-collections/community.general/pull/5440). -- dnstxt lookup plugin - start using Ansible's configuration manager to parse options (https://github.com/ansible-collections/community.general/pull/5440). -- filetree lookup plugin - start using Ansible's configuration manager to parse options (https://github.com/ansible-collections/community.general/pull/5440). -- flattened lookup plugin - start using Ansible's configuration manager to parse options (https://github.com/ansible-collections/community.general/pull/5440). -- gitlab module util - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259). -- gitlab_branch - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259). -- gitlab_deploy_key - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259). -- gitlab_group - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259). -- gitlab_group_members - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259). -- gitlab_group_variable - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259). -- gitlab_hook - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259). -- gitlab_hook - minor refactoring (https://github.com/ansible-collections/community.general/pull/5271). -- gitlab_project - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259). -- gitlab_project_members - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259). -- gitlab_project_variable - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259). -- gitlab_protected_branch - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259). -- gitlab_runner - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259). -- gitlab_user - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259). -- hiera lookup plugin - start using Ansible's configuration manager to parse options. The Hiera executable and config file can now also be passed as lookup parameters (https://github.com/ansible-collections/community.general/pull/5440). -- homebrew, homebrew_tap - added Homebrew on Linux path to defaults (https://github.com/ansible-collections/community.general/pull/5241). -- hponcfg - refactored module to use ``CmdRunner`` to execute ``hponcfg`` (https://github.com/ansible-collections/community.general/pull/5483). -- keycloak_* modules - add ``http_agent`` parameter with default value ``Ansible`` (https://github.com/ansible-collections/community.general/issues/5023). -- keyring lookup plugin - start using Ansible's configuration manager to parse options (https://github.com/ansible-collections/community.general/pull/5440). -- lastpass - use config manager for handling plugin options (https://github.com/ansible-collections/community.general/pull/5022). -- ldap_attrs - allow for DNs to have ``{x}`` prefix on first RDN (https://github.com/ansible-collections/community.general/issues/977, https://github.com/ansible-collections/community.general/pull/5450). -- linode inventory plugin - simplify option handling (https://github.com/ansible-collections/community.general/pull/5438). -- listen_ports_facts - add new ``include_non_listening`` option which adds ``-a`` option to ``netstat`` and ``ss``. This shows both listening and non-listening (for TCP this means established connections) sockets, and returns ``state`` and ``foreign_address`` (https://github.com/ansible-collections/community.general/issues/4762, https://github.com/ansible-collections/community.general/pull/4953). -- lmdb_kv lookup plugin - start using Ansible's configuration manager to parse options (https://github.com/ansible-collections/community.general/pull/5440). -- lxc_container - minor refactoring (https://github.com/ansible-collections/community.general/pull/5358). -- machinectl become plugin - can now be used with a password from another user than root, if a polkit rule is present (https://github.com/ansible-collections/community.general/pull/4849). -- machinectl become plugin - combine the success command when building the become command to be consistent with other become plugins (https://github.com/ansible-collections/community.general/pull/5287). -- manifold lookup plugin - start using Ansible's configuration manager to parse options (https://github.com/ansible-collections/community.general/pull/5440). -- maven_artifact - add a new ``unredirected_headers`` option that can be used with ansible-core 2.12 and above. The default value is to not use ``Authorization`` and ``Cookie`` headers on redirects for security reasons. With ansible-core 2.11, all headers are still passed on for redirects (https://github.com/ansible-collections/community.general/pull/4812). -- mksysb - refactored module to use ``CmdRunner`` to execute ``mksysb`` (https://github.com/ansible-collections/community.general/pull/5484). -- mksysb - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived modules (https://github.com/ansible-collections/community.general/pull/4674). -- nagios - minor refactoring on parameter validation for different actions (https://github.com/ansible-collections/community.general/pull/5239). -- netcup_dnsapi - add ``timeout`` parameter (https://github.com/ansible-collections/community.general/pull/5301). -- nmcli - add ``transport_mode`` configuration for Infiniband devices (https://github.com/ansible-collections/community.general/pull/5361). -- nmcli - add bond option ``xmit_hash_policy`` to bond options (https://github.com/ansible-collections/community.general/issues/5148). -- nmcli - adds ``vpn`` type and parameter for supporting VPN with service type L2TP and PPTP (https://github.com/ansible-collections/community.general/pull/4746). -- nmcli - honor IP options for VPNs (https://github.com/ansible-collections/community.general/pull/5228). -- onepassword - support version 2 of the OnePassword CLI (https://github.com/ansible-collections/community.general/pull/4728) -- opentelemetry callback plugin - allow configuring opentelementry callback via config file (https://github.com/ansible-collections/community.general/pull/4916). -- opentelemetry callback plugin - send logs. This can be disabled by setting ``disable_logs=false`` (https://github.com/ansible-collections/community.general/pull/4175). -- pacman - added parameters ``reason`` and ``reason_for`` to set/change the install reason of packages (https://github.com/ansible-collections/community.general/pull/4956). -- passwordstore lookup plugin - allow options to be passed lookup options instead of being part of the term strings (https://github.com/ansible-collections/community.general/pull/5444). -- passwordstore lookup plugin - allow using alternative password managers by detecting wrapper scripts, allow explicit configuration of pass and gopass backends (https://github.com/ansible-collections/community.general/issues/4766). -- passwordstore lookup plugin - improve error messages to include stderr (https://github.com/ansible-collections/community.general/pull/5436) -- pipx - added state ``latest`` to the module (https://github.com/ansible-collections/community.general/pull/5105). -- pipx - changed implementation to use ``cmd_runner`` (https://github.com/ansible-collections/community.general/pull/5085). -- pipx - module fails faster when ``name`` is missing for states ``upgrade`` and ``reinstall`` (https://github.com/ansible-collections/community.general/pull/5100). -- pipx - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived modules (https://github.com/ansible-collections/community.general/pull/4674). -- pipx module utils - created new module util ``pipx`` providing a ``cmd_runner`` specific for the ``pipx`` module (https://github.com/ansible-collections/community.general/pull/5085). -- portage - add knobs for Portage's ``--backtrack`` and ``--with-bdeps`` options (https://github.com/ansible-collections/community.general/pull/5349). -- portage - use Portage's python module instead of calling gentoolkit-provided program in shell (https://github.com/ansible-collections/community.general/pull/5349). -- proxmox inventory plugin - added new flag ``qemu_extended_statuses`` and new groups ``prelaunch``, ``paused``. They will be populated only when ``want_facts=true``, ``qemu_extended_statuses=true`` and only for ``QEMU`` machines (https://github.com/ansible-collections/community.general/pull/4723). -- proxmox inventory plugin - simplify option handling code (https://github.com/ansible-collections/community.general/pull/5437). -- proxmox module utils, the proxmox* modules - add ``api_task_ok`` helper to standardize API task status checks across all proxmox modules (https://github.com/ansible-collections/community.general/pull/5274). -- proxmox_kvm - allow ``agent`` argument to be a string (https://github.com/ansible-collections/community.general/pull/5107). -- proxmox_snap - add ``unbind`` param to support snapshotting containers with configured mountpoints (https://github.com/ansible-collections/community.general/pull/5274). -- puppet - adds ``confdir`` parameter to configure a custom confir location (https://github.com/ansible-collections/community.general/pull/4740). -- redfish - added new command GetVirtualMedia, VirtualMediaInsert and VirtualMediaEject to Systems category due to Redfish spec changes the virtualMedia resource location from Manager to System (https://github.com/ansible-collections/community.general/pull/5124). -- redfish_config - add ``SetSessionService`` to set default session timeout policy (https://github.com/ansible-collections/community.general/issues/5008). -- redfish_info - add ``GetManagerInventory`` to report list of Manager inventory information (https://github.com/ansible-collections/community.general/issues/4899). -- seport - added new argument ``local`` (https://github.com/ansible-collections/community.general/pull/5203) -- snap - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived modules (https://github.com/ansible-collections/community.general/pull/4674). -- sudoers - will attempt to validate the proposed sudoers rule using visudo if available, optionally skipped, or required (https://github.com/ansible-collections/community.general/pull/4794, https://github.com/ansible-collections/community.general/issues/4745). -- terraform - adds capability to handle complex variable structures for ``variables`` parameter in the module. This must be enabled with the new ``complex_vars`` parameter (https://github.com/ansible-collections/community.general/pull/4797). -- terraform - run ``terraform init`` with ``-no-color`` not to mess up the stdout of the task (https://github.com/ansible-collections/community.general/pull/5147). -- wdc_redfish_command - add ``IndicatorLedOn`` and ``IndicatorLedOff`` commands for ``Chassis`` category (https://github.com/ansible-collections/community.general/pull/5059). -- wdc_redfish_command - add ``PowerModeLow`` and ``PowerModeNormal`` commands for ``Chassis`` category (https://github.com/ansible-collections/community.general/pull/5145). -- xfconf - add ``stdout``, ``stderr`` and ``cmd`` to the module results (https://github.com/ansible-collections/community.general/pull/5037). -- xfconf - changed implementation to use ``cmd_runner`` (https://github.com/ansible-collections/community.general/pull/4776). -- xfconf - use ``do_raise()`` instead of defining custom exception class (https://github.com/ansible-collections/community.general/pull/4975). -- xfconf - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived modules (https://github.com/ansible-collections/community.general/pull/4674). -- xfconf module utils - created new module util ``xfconf`` providing a ``cmd_runner`` specific for ``xfconf`` modules (https://github.com/ansible-collections/community.general/pull/4776). -- xfconf_info - changed implementation to use ``cmd_runner`` (https://github.com/ansible-collections/community.general/pull/4776). -- xfconf_info - use ``do_raise()`` instead of defining custom exception class (https://github.com/ansible-collections/community.general/pull/4975). -- znode - possibility to use ZooKeeper ACL authentication (https://github.com/ansible-collections/community.general/pull/5306). - -Breaking Changes / Porting Guide --------------------------------- - -- newrelic_deployment - ``revision`` is required for v2 API (https://github.com/ansible-collections/community.general/pull/5341). -- scaleway_container_registry_info - no longer replace ``secret_environment_variables`` in the output by ``SENSITIVE_VALUE`` (https://github.com/ansible-collections/community.general/pull/5497). - -Deprecated Features -------------------- - -- ArgFormat module utils - deprecated along ``CmdMixin``, in favor of the ``cmd_runner_fmt`` module util (https://github.com/ansible-collections/community.general/pull/5370). -- CmdMixin module utils - deprecated in favor of the ``CmdRunner`` module util (https://github.com/ansible-collections/community.general/pull/5370). -- CmdModuleHelper module utils - deprecated in favor of the ``CmdRunner`` module util (https://github.com/ansible-collections/community.general/pull/5370). -- CmdStateModuleHelper module utils - deprecated in favor of the ``CmdRunner`` module util (https://github.com/ansible-collections/community.general/pull/5370). -- cmd_runner module utils - deprecated ``fmt`` in favour of ``cmd_runner_fmt`` as the parameter format object (https://github.com/ansible-collections/community.general/pull/4777). -- django_manage - support for Django releases older than 4.1 has been deprecated and will be removed in community.general 9.0.0 (https://github.com/ansible-collections/community.general/pull/5400). -- django_manage - support for the commands ``cleanup``, ``syncdb`` and ``validate`` that have been deprecated in Django long time ago will be removed in community.general 9.0.0 (https://github.com/ansible-collections/community.general/pull/5400). -- django_manage - the behavior of "creating the virtual environment when missing" is being deprecated and will be removed in community.general version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5405). -- gconftool2 - deprecates ``state=get`` in favor of using the module ``gconftool2_info`` (https://github.com/ansible-collections/community.general/pull/4778). -- lxc_container - the module will no longer make any effort to support Python 2 (https://github.com/ansible-collections/community.general/pull/5304). -- newrelic_deployment - ``appname`` and ``environment`` are no longer valid options in the v2 API. They will be removed in community.general 7.0.0 (https://github.com/ansible-collections/community.general/pull/5341). -- proxmox - deprecated the current ``unprivileged`` default value, will be changed to ``true`` in community.general 7.0.0 (https://github.com/pull/5224). -- xfconf - deprecated parameter ``disable_facts``, as since version 4.0.0 it only allows value ``true`` (https://github.com/ansible-collections/community.general/pull/4520). - -Removed Features (previously deprecated) ----------------------------------------- - -- bitbucket* modules - ``username`` is no longer an alias of ``workspace``, but of ``user`` (https://github.com/ansible-collections/community.general/pull/5326). -- gem - the default of the ``norc`` option changed from ``false`` to ``true`` (https://github.com/ansible-collections/community.general/pull/5326). -- gitlab_group_members - ``gitlab_group`` must now always contain the full path, and no longer just the name or path (https://github.com/ansible-collections/community.general/pull/5326). -- keycloak_authentication - the return value ``flow`` has been removed. Use ``end_state`` instead (https://github.com/ansible-collections/community.general/pull/5326). -- keycloak_group - the return value ``group`` has been removed. Use ``end_state`` instead (https://github.com/ansible-collections/community.general/pull/5326). -- lxd_container - the default of the ``ignore_volatile_options`` option changed from ``true`` to ``false`` (https://github.com/ansible-collections/community.general/pull/5326). -- mail callback plugin - the ``sender`` option is now required (https://github.com/ansible-collections/community.general/pull/5326). -- module_helper module utils - remove the ``VarDict`` attribute from ``ModuleHelper``. Import ``VarDict`` from ``ansible_collections.community.general.plugins.module_utils.mh.mixins.vars`` instead (https://github.com/ansible-collections/community.general/pull/5326). -- proxmox inventory plugin - the default of the ``want_proxmox_nodes_ansible_host`` option changed from ``true`` to ``false`` (https://github.com/ansible-collections/community.general/pull/5326). -- vmadm - the ``debug`` option has been removed. It was not used anyway (https://github.com/ansible-collections/community.general/pull/5326). - -Bugfixes --------- - -- Include ``PSF-license.txt`` file for ``plugins/module_utils/_mount.py``. -- Include ``simplified_bsd.txt`` license file for various module utils, the ``lxca_common`` docs fragment, and the ``utm_utils`` unit tests. -- alternatives - do not set the priority if the priority was not set by the user (https://github.com/ansible-collections/community.general/pull/4810). -- alternatives - only pass subcommands when they are specified as module arguments (https://github.com/ansible-collections/community.general/issues/4803, https://github.com/ansible-collections/community.general/issues/4804, https://github.com/ansible-collections/community.general/pull/4836). -- alternatives - when ``subcommands`` is specified, ``link`` must be given for every subcommand. This was already mentioned in the documentation, but not enforced by the code (https://github.com/ansible-collections/community.general/pull/4836). -- apache2_mod_proxy - avoid crash when reporting inability to parse balancer_member_page HTML caused by using an undefined variable in the error message (https://github.com/ansible-collections/community.general/pull/5111). -- archive - avoid crash when ``lzma`` is not present and ``format`` is not ``xz`` (https://github.com/ansible-collections/community.general/pull/5393). -- cmd_runner module utils - fix bug caused by using the ``command`` variable instead of ``self.command`` when looking for binary path (https://github.com/ansible-collections/community.general/pull/4903). -- consul - fixed bug introduced in PR 4590 (https://github.com/ansible-collections/community.general/issues/4680). -- credstash lookup plugin - pass plugin options to credstash for all terms, not just for the first (https://github.com/ansible-collections/community.general/pull/5440). -- dig lookup plugin - add option to return empty result without empty strings, and return empty list instead of ``NXDOMAIN`` (https://github.com/ansible-collections/community.general/pull/5439, https://github.com/ansible-collections/community.general/issues/5428). -- dig lookup plugin - fix evaluation of falsy values for boolean parameters ``fail_on_error`` and ``retry_servfail`` (https://github.com/ansible-collections/community.general/pull/5129). -- dnsimple_info - correctly report missing library as ``requests`` and not ``another_library`` (https://github.com/ansible-collections/community.general/pull/5111). -- dnstxt lookup plugin - add option to return empty result without empty strings, and return empty list instead of ``NXDOMAIN`` (https://github.com/ansible-collections/community.general/pull/5457, https://github.com/ansible-collections/community.general/issues/5428). -- dsv lookup plugin - do not ignore the ``tld`` parameter (https://github.com/ansible-collections/community.general/pull/4911). -- filesystem - handle ``fatresize --info`` output lines without ``:`` (https://github.com/ansible-collections/community.general/pull/4700). -- filesystem - improve error messages when output cannot be parsed by including newlines in escaped form (https://github.com/ansible-collections/community.general/pull/4700). -- funcd connection plugin - fix signature of ``exec_command`` (https://github.com/ansible-collections/community.general/pull/5111). -- ini_file - minor refactor fixing a python lint error (https://github.com/ansible-collections/community.general/pull/5307). -- iso_create - the module somtimes failed to add folders for Joliet and UDF formats (https://github.com/ansible-collections/community.general/issues/5275). -- keycloak_realm - fix default groups and roles (https://github.com/ansible-collections/community.general/issues/4241). -- keyring_info - fix the result from the keyring library never getting returned (https://github.com/ansible-collections/community.general/pull/4964). -- ldap_attrs - fix bug which caused a ``Bad search filter`` error. The error was occuring when the ldap attribute value contained special characters such as ``(`` or ``*`` (https://github.com/ansible-collections/community.general/issues/5434, https://github.com/ansible-collections/community.general/pull/5435). -- ldap_attrs - fix ordering issue by ignoring the ``{x}`` prefix on attribute values (https://github.com/ansible-collections/community.general/issues/977, https://github.com/ansible-collections/community.general/pull/5385). -- listen_ports_facts - removed leftover ``EnvironmentError`` . The ``else`` clause had a wrong indentation. The check is now handled in the ``split_pid_name`` function (https://github.com/ansible-collections/community.general/pull/5202). -- locale_gen - fix support for Ubuntu (https://github.com/ansible-collections/community.general/issues/5281). -- lxc_container - the module has been updated to support Python 3 (https://github.com/ansible-collections/community.general/pull/5304). -- lxd connection plugin - fix incorrect ``inventory_hostname`` in ``remote_addr``. This is needed for compatibility with ansible-core 2.13 (https://github.com/ansible-collections/community.general/issues/4886). -- manageiq_alert_profiles - avoid crash when reporting unknown profile caused by trying to return an undefined variable (https://github.com/ansible-collections/community.general/pull/5111). -- nmcli - avoid changed status for most cases with VPN connections (https://github.com/ansible-collections/community.general/pull/5126). -- nmcli - fix error caused by adding undefined module arguments for list options (https://github.com/ansible-collections/community.general/issues/4373, https://github.com/ansible-collections/community.general/pull/4813). -- nmcli - fix error when setting previously unset MAC address, ``gsm.apn`` or ``vpn.data``: current values were being normalized without checking if they might be ``None`` (https://github.com/ansible-collections/community.general/pull/5291). -- nmcli - fix int options idempotence (https://github.com/ansible-collections/community.general/issues/4998). -- nsupdate - compatibility with NS records (https://github.com/ansible-collections/community.general/pull/5112). -- nsupdate - fix silent failures when updating ``NS`` entries from Bind9 managed DNS zones (https://github.com/ansible-collections/community.general/issues/4657). -- opentelemetry callback plugin - support opentelemetry-api 1.13.0 that removed support for ``_time_ns`` (https://github.com/ansible-collections/community.general/pull/5342). -- osx_defaults - no longer expand ``~`` in ``value`` to the user's home directory, or expand environment variables (https://github.com/ansible-collections/community.general/issues/5234, https://github.com/ansible-collections/community.general/pull/5243). -- packet_ip_subnet - fix error reporting in case of invalid CIDR prefix lengths (https://github.com/ansible-collections/community.general/pull/5111). -- pacman - fixed name resolution of URL packages (https://github.com/ansible-collections/community.general/pull/4959). -- passwordstore lookup plugin - fix ``returnall`` for gopass (https://github.com/ansible-collections/community.general/pull/5027). -- passwordstore lookup plugin - fix password store path detection for gopass (https://github.com/ansible-collections/community.general/pull/4955). -- pfexec become plugin - remove superflous quotes preventing exe wrap from working as expected (https://github.com/ansible-collections/community.general/issues/3671, https://github.com/ansible-collections/community.general/pull/3889). -- pip_package_info - remove usage of global variable (https://github.com/ansible-collections/community.general/pull/5111). -- pkgng - fix case when ``pkg`` fails when trying to upgrade all packages (https://github.com/ansible-collections/community.general/issues/5363). -- proxmox - fix error handling when getting VM by name when ``state=absent`` (https://github.com/ansible-collections/community.general/pull/4945). -- proxmox inventory plugin - fix crash when ``enabled=1`` is used in agent config string (https://github.com/ansible-collections/community.general/pull/4910). -- proxmox inventory plugin - fixed extended status detection for qemu (https://github.com/ansible-collections/community.general/pull/4816). -- proxmox_kvm - fix ``agent`` parameter when boolean value is specified (https://github.com/ansible-collections/community.general/pull/5198). -- proxmox_kvm - fix error handling when getting VM by name when ``state=absent`` (https://github.com/ansible-collections/community.general/pull/4945). -- proxmox_kvm - fix exception when no ``agent`` argument is specified (https://github.com/ansible-collections/community.general/pull/5194). -- proxmox_kvm - fix wrong condition (https://github.com/ansible-collections/community.general/pull/5108). -- proxmox_kvm - replace new condition with proper condition to allow for using ``vmid`` on update (https://github.com/ansible-collections/community.general/pull/5206). -- rax_clb_nodes - fix code to be compatible with Python 3 (https://github.com/ansible-collections/community.general/pull/4933). -- redfish_command - fix the check if a virtual media is unmounted to just check for ``instered= false`` caused by Supermicro hardware that does not clear the ``ImageName`` (https://github.com/ansible-collections/community.general/pull/4839). -- redfish_command - the Supermicro Redfish implementation only supports the ``image_url`` parameter in the underlying API calls to ``VirtualMediaInsert`` and ``VirtualMediaEject``. Any values set (or the defaults) for ``write_protected`` or ``inserted`` will be ignored (https://github.com/ansible-collections/community.general/pull/4839). -- redfish_info - fix to ``GetChassisPower`` to correctly report power information when multiple chassis exist, but not all chassis report power information (https://github.com/ansible-collections/community.general/issues/4901). -- redfish_utils module utils - centralize payload checking when performing modification requests to a Redfish service (https://github.com/ansible-collections/community.general/issues/5210/). -- redhat_subscription - fix unsubscribing on RHEL 9 (https://github.com/ansible-collections/community.general/issues/4741). -- redhat_subscription - make module idempotent when ``pool_ids`` are used (https://github.com/ansible-collections/community.general/issues/5313). -- redis* modules - fix call to ``module.fail_json`` when failing because of missing Python libraries (https://github.com/ansible-collections/community.general/pull/4733). -- slack - fix incorrect channel prefix ``#`` caused by incomplete pattern detection by adding ``G0`` and ``GF`` as channel ID patterns (https://github.com/ansible-collections/community.general/pull/5019). -- slack - fix message update for channels which start with ``CP``. When ``message-id`` was passed it failed for channels which started with ``CP`` because the ``#`` symbol was added before the ``channel_id`` (https://github.com/ansible-collections/community.general/pull/5249). -- snap - allow values in the ``options`` parameter to contain whitespaces (https://github.com/ansible-collections/community.general/pull/5475). -- sudoers - ensure sudoers config files are created with the permissions requested by sudoers (0440) (https://github.com/ansible-collections/community.general/pull/4814). -- sudoers - fix incorrect handling of ``state: absent`` (https://github.com/ansible-collections/community.general/issues/4852). -- tss lookup plugin - adding support for updated Delinea library (https://github.com/DelineaXPM/python-tss-sdk/issues/9, https://github.com/ansible-collections/community.general/pull/5151). -- virtualbox inventory plugin - skip parsing values with keys that have both a value and nested data. Skip parsing values that are nested more than two keys deep (https://github.com/ansible-collections/community.general/issues/5332, https://github.com/ansible-collections/community.general/pull/5348). -- xcc_redfish_command - for compatibility due to Redfish spec changes the virtualMedia resource location changed from Manager to System (https://github.com/ansible-collections/community.general/pull/4682). -- xenserver_facts - fix broken ``AnsibleModule`` call that prevented the module from working at all (https://github.com/ansible-collections/community.general/pull/5383). -- xfconf - fix setting of boolean values (https://github.com/ansible-collections/community.general/issues/4999, https://github.com/ansible-collections/community.general/pull/5007). -- zfs - fix wrong quoting of properties (https://github.com/ansible-collections/community.general/issues/4707, https://github.com/ansible-collections/community.general/pull/4726). - -New Plugins ------------ - -Filter -~~~~~~ - -- counter - Counts hashable elements in a sequence - -Lookup -~~~~~~ - -- bitwarden - Retrieve secrets from Bitwarden - -New Modules ------------ - -- gconftool2_info - Retrieve GConf configurations -- iso_customize - Add/remove/change files in ISO file -- keycloak_user_rolemapping - Allows administration of Keycloak user_rolemapping with the Keycloak API -- keyring - Set or delete a passphrase using the Operating System's native keyring -- keyring_info - Get a passphrase using the Operating System's native keyring -- manageiq_policies_info - Listing of resource policy_profiles in ManageIQ -- manageiq_tags_info - Retrieve resource tags in ManageIQ -- pipx_info - Rretrieves information about applications installed with pipx -- proxmox_disk - Management of a disk of a Qemu(KVM) VM in a Proxmox VE cluster. -- scaleway_compute_private_network - Scaleway compute - private network management -- scaleway_container - Scaleway Container management -- scaleway_container_info - Retrieve information on Scaleway Container -- scaleway_container_namespace - Scaleway Container namespace management -- scaleway_container_namespace_info - Retrieve information on Scaleway Container namespace -- scaleway_container_registry - Scaleway Container registry management module -- scaleway_container_registry_info - Scaleway Container registry info module -- scaleway_function - Scaleway Function management -- scaleway_function_info - Retrieve information on Scaleway Function -- scaleway_function_namespace - Scaleway Function namespace management -- scaleway_function_namespace_info - Retrieve information on Scaleway Function namespace -- wdc_redfish_command - Manages WDC UltraStar Data102 Out-Of-Band controllers using Redfish APIs -- wdc_redfish_info - Manages WDC UltraStar Data102 Out-Of-Band controllers using Redfish APIs +This file is a placeholder; a version-specific ``CHANGELOG-vX.rst`` will be generated during releases from fragments +under ``changelogs/fragments``. On release branches once a release has been created, consult the branch's version-specific +file for changes that have occurred in that branch. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 358daa5e91..94c5299069 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -31,7 +31,9 @@ Also, consider taking up a valuable, reviewed, but abandoned pull request which * Try committing your changes with an informative but short commit message. * Do not squash your commits and force-push to your branch if not needed. Reviews of your pull request are much easier with individual commits to comprehend the pull request history. All commits of your pull request branch will be squashed into one commit by GitHub upon merge. * Do not add merge commits to your PR. The bot will complain and you will have to rebase ([instructions for rebasing](https://docs.ansible.com/ansible/latest/dev_guide/developing_rebasing.html)) to remove them before your PR can be merged. To avoid that git automatically does merges during pulls, you can configure it to do rebases instead by running `git config pull.rebase true` inside the repository checkout. -* Make sure your PR includes a [changelog fragment](https://docs.ansible.com/ansible/devel/community/development_process.html#creating-changelog-fragments). (You must not include a fragment for new modules or new plugins, except for test and filter plugins. Also you shouldn't include one for docs-only changes. If you're not sure, simply don't include one, we'll tell you whether one is needed or not :) ) +* Make sure your PR includes a [changelog fragment](https://docs.ansible.com/ansible/devel/community/collection_development_process.html#creating-a-changelog-fragment). + * You must not include a fragment for new modules or new plugins. Also you shouldn't include one for docs-only changes. (If you're not sure, simply don't include one, we'll tell you whether one is needed or not :) ) + * Please always include a link to the pull request itself, and if the PR is about an issue, also a link to the issue. Also make sure the fragment ends with a period, and begins with a lower-case letter after `-`. (Again, if you don't do this, we'll add suggestions to fix it, so don't worry too much :) ) * Avoid reformatting unrelated parts of the codebase in your PR. These types of changes will likely be requested for reversion, create additional work for reviewers, and may cause approval to be delayed. You can also read [our Quick-start development guide](https://github.com/ansible/community-docs/blob/main/create_pr_quick_start_guide.rst). @@ -42,7 +44,49 @@ If you want to test a PR locally, refer to [our testing guide](https://github.co If you find any inconsistencies or places in this document which can be improved, feel free to raise an issue or pull request to fix it. -## Run sanity, unit or integration tests locally +## Run sanity or unit locally (with antsibull-nox) + +The easiest way to run sanity and unit tests locally is to use [antsibull-nox](https://ansible.readthedocs.io/projects/antsibull-nox/). +(If you have [nox](https://nox.thea.codes/en/stable/) installed, it will automatically install antsibull-nox in a virtual environment for you.) + +### Sanity tests + +The following commands show how to run ansible-test sanity tests: + +```.bash +# Run basic sanity tests for all files in the collection: +nox -Re ansible-test-sanity-devel + +# Run basic sanity tests for the given files and directories: +nox -Re ansible-test-sanity-devel -- plugins/modules/system/pids.py tests/integration/targets/pids/ + +# Run all other sanity tests for all files in the collection: +nox -R +``` + +If you replace `-Re` with `-e`, respectively. If you leave `-R` away, then the virtual environments will be re-created. The `-R` re-uses them (if they already exist). + +### Unit tests + +The following commands show how to run unit tests: + +```.bash +# Run all unit tests: +nox -Re ansible-test-units-devel + +# Run all unit tests for one Python version (a lot faster): +nox -Re ansible-test-units-devel -- --python 3.13 + +# Run a specific unit test (for the nmcli module) for one Python version: +nox -Re ansible-test-units-devel -- --python 3.13 tests/unit/plugins/modules/net_tools/test_nmcli.py +``` + +If you replace `-Re` with `-e`, then the virtual environments will be re-created. The `-R` re-uses them (if they already exist). + +## Run basic sanity, unit or integration tests locally (with ansible-test) + +Instead of using antsibull-nox, you can also run sanity and unit tests with ansible-test directly. +This also allows you to run integration tests. You have to check out the repository into a specific path structure to be able to run `ansible-test`. The path to the git checkout must end with `.../ansible_collections/community/general`. Please see [our testing guide](https://github.com/ansible/community-docs/blob/main/test_pr_locally_guide.rst) for instructions on how to check out the repository into a correct path structure. The short version of these instructions is: @@ -54,16 +98,27 @@ cd ~/dev/ansible_collections/community/general Then you can run `ansible-test` (which is a part of [ansible-core](https://pypi.org/project/ansible-core/)) inside the checkout. The following example commands expect that you have installed Docker or Podman. Note that Podman has only been supported by more recent ansible-core releases. If you are using Docker, the following will work with Ansible 2.9+. -The following commands show how to run sanity tests: +### Basic sanity tests + +The following commands show how to run basic sanity tests: ```.bash -# Run sanity tests for all files in the collection: +# Run basic sanity tests for all files in the collection: ansible-test sanity --docker -v -# Run sanity tests for the given files and directories: +# Run basic sanity tests for the given files and directories: ansible-test sanity --docker -v plugins/modules/system/pids.py tests/integration/targets/pids/ ``` +### Unit tests + +Note that for running unit tests, you need to install required collections in the same folder structure that `community.general` is checked out in. +Right now, you need to install [`community.internal_test_tools`](https://github.com/ansible-collections/community.internal_test_tools). +If you want to use the latest version from GitHub, you can run: +``` +git clone https://github.com/ansible-collections/community.internal_test_tools.git ~/dev/ansible_collections/community/internal_test_tools +``` + The following commands show how to run unit tests: ```.bash @@ -77,13 +132,42 @@ ansible-test units --docker -v --python 3.8 ansible-test units --docker -v --python 3.8 tests/unit/plugins/modules/net_tools/test_nmcli.py ``` +### Integration tests + +Note that for running integration tests, you need to install required collections in the same folder structure that `community.general` is checked out in. +Right now, depending on the test, you need to install [`ansible.posix`](https://github.com/ansible-collections/ansible.posix), [`community.crypto`](https://github.com/ansible-collections/community.crypto), and [`community.docker`](https://github.com/ansible-collections/community.docker): +If you want to use the latest versions from GitHub, you can run: +``` +mkdir -p ~/dev/ansible_collections/ansible +git clone https://github.com/ansible-collections/ansible.posix.git ~/dev/ansible_collections/ansible/posix +git clone https://github.com/ansible-collections/community.crypto.git ~/dev/ansible_collections/community/crypto +git clone https://github.com/ansible-collections/community.docker.git ~/dev/ansible_collections/community/docker +``` + The following commands show how to run integration tests: -```.bash -# Run integration tests for the interfaces_files module in a Docker container using the -# fedora35 operating system image (the supported images depend on your ansible-core version): -ansible-test integration --docker fedora35 -v interfaces_file +#### In Docker +Integration tests on Docker have the following parameters: +- `image_name` (required): The name of the Docker image. To get the list of supported Docker images, run + `ansible-test integration --help` and look for _target docker images_. +- `test_name` (optional): The name of the integration test. + For modules, this equals the short name of the module; for example, `pacman` in case of `community.general.pacman`. + For plugins, the plugin type is added before the plugin's short name, for example `callback_yaml` for the `community.general.yaml` callback. +```.bash +# Test all plugins/modules on fedora40 +ansible-test integration -v --docker fedora40 + +# Template +ansible-test integration -v --docker image_name test_name + +# Example community.general.ini_file module on fedora40 Docker image: +ansible-test integration -v --docker fedora40 ini_file +``` + +#### Without isolation + +```.bash # Run integration tests for the flattened lookup **without any isolation**: ansible-test integration -v lookup_flattened ``` @@ -121,19 +205,3 @@ Creating new modules and plugins requires a bit more work than other Pull Reques listed as `maintainers` will be pinged for new issues and PRs that modify the module/plugin or its tests. When you add a new plugin/module, we expect that you perform maintainer duty for at least some time after contributing it. - -## pre-commit - -To help ensure high-quality contributions this repository includes a [pre-commit](https://pre-commit.com) configuration which -corrects and tests against common issues that would otherwise cause CI to fail. To begin using these pre-commit hooks see -the [Installation](#installation) section below. - -This is optional and not required to contribute to this repository. - -### Installation - -Follow the [instructions](https://pre-commit.com/#install) provided with pre-commit and run `pre-commit install` under the repository base. If for any reason you would like to disable the pre-commit hooks run `pre-commit uninstall`. - -This is optional to run it locally. - -You can trigger it locally with `pre-commit run --all-files` or even to run only for a given file `pre-commit run --files YOUR_FILE`. diff --git a/README.md b/README.md index 77888bdb7b..726d9cb872 100644 --- a/README.md +++ b/README.md @@ -6,8 +6,12 @@ SPDX-License-Identifier: GPL-3.0-or-later # Community General Collection -[![Build Status](https://dev.azure.com/ansible/community.general/_apis/build/status/CI?branchName=stable-6)](https://dev.azure.com/ansible/community.general/_build?definitionId=31) +[![Documentation](https://img.shields.io/badge/docs-brightgreen.svg)](https://docs.ansible.com/ansible/devel/collections/community/general/) +[![Build Status](https://dev.azure.com/ansible/community.general/_apis/build/status/CI?branchName=main)](https://dev.azure.com/ansible/community.general/_build?definitionId=31) +[![EOL CI](https://github.com/ansible-collections/community.general/actions/workflows/ansible-test.yml/badge.svg?branch=main)](https://github.com/ansible-collections/community.general/actions) +[![Nox CI](https://github.com/ansible-collections/community.general/actions/workflows/nox.yml/badge.svg?branch=main)](https://github.com/ansible-collections/community.general/actions) [![Codecov](https://img.shields.io/codecov/c/github/ansible-collections/community.general)](https://codecov.io/gh/ansible-collections/community.general) +[![REUSE status](https://api.reuse.software/badge/github.com/ansible-collections/community.general)](https://api.reuse.software/info/github.com/ansible-collections/community.general) This repository contains the `community.general` Ansible Collection. The collection is a part of the Ansible package and includes many modules and plugins supported by Ansible community which are not part of more specialized community collections. @@ -21,11 +25,21 @@ We follow [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/comm If you encounter abusive behavior violating the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html), please refer to the [policy violations](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html#policy-violations) section of the Code of Conduct for information on how to raise a complaint. +## Communication + +* Join the Ansible forum: + * [Get Help](https://forum.ansible.com/c/help/6): get help or help others. This is for questions about modules or plugins in the collection. Please add appropriate tags if you start new discussions. + * [Tag `community-general`](https://forum.ansible.com/tag/community-general): discuss the *collection itself*, instead of specific modules or plugins. + * [Social Spaces](https://forum.ansible.com/c/chat/4): gather and interact with fellow enthusiasts. + * [News & Announcements](https://forum.ansible.com/c/news/5): track project-wide announcements including social events. + +* The Ansible [Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn): used to announce releases and important changes. + +For more information about communication, see the [Ansible communication guide](https://docs.ansible.com/ansible/devel/community/communication.html). + ## Tested with Ansible -Tested with the current ansible-core 2.11, ansible-core 2.12, ansible-core 2.13, ansible-core 2.14 releases and the current development version of ansible-core. Ansible-core versions before 2.11.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases. - -Parts of this collection will not work with ansible-core 2.11 on Python 3.12+. +Tested with the current ansible-core 2.17, ansible-core 2.18, ansible-core 2.19, ansible-core 2.20 releases and the current development version of ansible-core. Ansible-core versions before 2.17.0 are not supported. This includes all ansible-base 2.10 and Ansible 2.9 releases. ## External requirements @@ -33,13 +47,13 @@ Some modules and plugins require external libraries. Please check the requiremen ## Included content -Please check the included content on the [Ansible Galaxy page for this collection](https://galaxy.ansible.com/community/general) or the [documentation on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/). +Please check the included content on the [Ansible Galaxy page for this collection](https://galaxy.ansible.com/ui/repo/published/community/general/) or the [documentation on the Ansible docs site](https://docs.ansible.com/ansible/latest/collections/community/general/). ## Using this collection This collection is shipped with the Ansible package. So if you have it installed, no more action is required. -If you have a minimal installation (only Ansible Core installed) or you want to use the latest version of the collection along with the whole Ansible package, you need to install the collection from [Ansible Galaxy](https://galaxy.ansible.com/community/general) manually with the `ansible-galaxy` command-line tool: +If you have a minimal installation (only Ansible Core installed) or you want to use the latest version of the collection along with the whole Ansible package, you need to install the collection from [Ansible Galaxy](https://galaxy.ansible.com/ui/repo/published/community/general/) manually with the `ansible-galaxy` command-line tool: ansible-galaxy collection install community.general @@ -56,7 +70,7 @@ Note that if you install the collection manually, it will not be upgraded automa ansible-galaxy collection install community.general --upgrade ``` -You can also install a specific version of the collection, for example, if you need to downgrade when something is broken in the latest version (please report an issue in this repository). Use the following syntax where `X.Y.Z` can be any [available version](https://galaxy.ansible.com/community/general): +You can also install a specific version of the collection, for example, if you need to downgrade when something is broken in the latest version (please report an issue in this repository). Use the following syntax where `X.Y.Z` can be any [available version](https://galaxy.ansible.com/ui/repo/published/community/general/): ```bash ansible-galaxy collection install community.general:==X.Y.Z @@ -72,13 +86,13 @@ We are actively accepting new contributors. All types of contributions are very welcome. -You don't know how to start? Refer to our [contribution guide](https://github.com/ansible-collections/community.general/blob/stable-6/CONTRIBUTING.md)! +You don't know how to start? Refer to our [contribution guide](https://github.com/ansible-collections/community.general/blob/main/CONTRIBUTING.md)! -The current maintainers are listed in the [commit-rights.md](https://github.com/ansible-collections/community.general/blob/stable-6/commit-rights.md#people) file. If you have questions or need help, feel free to mention them in the proposals. +The current maintainers are listed in the [commit-rights.md](https://github.com/ansible-collections/community.general/blob/main/commit-rights.md#people) file. If you have questions or need help, feel free to mention them in the proposals. You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html). -Also for some notes specific to this collection see [our CONTRIBUTING documentation](https://github.com/ansible-collections/community.general/blob/stable-6/CONTRIBUTING.md). +Also for some notes specific to this collection see [our CONTRIBUTING documentation](https://github.com/ansible-collections/community.general/blob/main/CONTRIBUTING.md). ### Running tests @@ -88,7 +102,7 @@ See [here](https://docs.ansible.com/ansible/devel/dev_guide/developing_collectio To learn how to maintain / become a maintainer of this collection, refer to: -* [Committer guidelines](https://github.com/ansible-collections/community.general/blob/stable-6/commit-rights.md). +* [Committer guidelines](https://github.com/ansible-collections/community.general/blob/main/commit-rights.md). * [Maintainer guidelines](https://github.com/ansible/community-docs/blob/main/maintaining.rst). It is necessary for maintainers of this collection to be subscribed to: @@ -98,25 +112,13 @@ It is necessary for maintainers of this collection to be subscribed to: They also should be subscribed to Ansible's [The Bullhorn newsletter](https://docs.ansible.com/ansible/devel/community/communication.html#the-bullhorn). -## Communication - -We announce important development changes and releases through Ansible's [The Bullhorn newsletter](https://eepurl.com/gZmiEP). If you are a collection developer, be sure you are subscribed. - -Join us in the `#ansible` (general use questions and support), `#ansible-community` (community and collection development questions), and other [IRC channels](https://docs.ansible.com/ansible/devel/community/communication.html#irc-channels) on [Libera.chat](https://libera.chat). - -We take part in the global quarterly [Ansible Contributor Summit](https://github.com/ansible/community/wiki/Contributor-Summit) virtually or in-person. Track [The Bullhorn newsletter](https://eepurl.com/gZmiEP) and join us. - -For more information about communities, meetings and agendas see [Community Wiki](https://github.com/ansible/community/wiki/Community). - -For more information about communication, refer to Ansible's the [Communication guide](https://docs.ansible.com/ansible/devel/community/communication.html). - ## Publishing New Version See the [Releasing guidelines](https://github.com/ansible/community-docs/blob/main/releasing_collections.rst) to learn how to release this collection. ## Release notes -See the [changelog](https://github.com/ansible-collections/community.general/blob/stable-6/CHANGELOG.rst). +See the [changelog](https://github.com/ansible-collections/community.general/blob/main/CHANGELOG.md). ## Roadmap @@ -135,8 +137,8 @@ See [this issue](https://github.com/ansible-collections/community.general/issues This collection is primarily licensed and distributed as a whole under the GNU General Public License v3.0 or later. -See [LICENSES/GPL-3.0-or-later.txt](https://github.com/ansible-collections/community.general/blob/stable-6/COPYING) for the full text. +See [LICENSES/GPL-3.0-or-later.txt](https://github.com/ansible-collections/community.general/blob/main/COPYING) for the full text. -Parts of the collection are licensed under the [BSD 2-Clause license](https://github.com/ansible-collections/community.general/blob/stable-6/LICENSES/BSD-2-Clause.txt), the [MIT license](https://github.com/ansible-collections/community.general/blob/stable-6/LICENSES/MIT.txt), and the [PSF 2.0 license](https://github.com/ansible-collections/community.general/blob/stable-6/LICENSES/PSF-2.0.txt). +Parts of the collection are licensed under the [BSD 2-Clause license](https://github.com/ansible-collections/community.general/blob/main/LICENSES/BSD-2-Clause.txt), the [MIT license](https://github.com/ansible-collections/community.general/blob/main/LICENSES/MIT.txt), and the [PSF 2.0 license](https://github.com/ansible-collections/community.general/blob/main/LICENSES/PSF-2.0.txt). -All files have a machine readable `SDPX-License-Identifier:` comment denoting its respective license(s) or an equivalent entry in an accompanying `.license` file. Only changelog fragments (which will not be part of a release) are covered by a blanket statement in `.reuse/dep5`. This conforms to the [REUSE specification](https://reuse.software/spec/). +All files have a machine readable `SDPX-License-Identifier:` comment denoting its respective license(s) or an equivalent entry in an accompanying `.license` file. Only changelog fragments (which will not be part of a release) are covered by a blanket statement in `REUSE.toml`. This conforms to the [REUSE specification](https://reuse.software/spec/). diff --git a/tests/integration/targets/setup_postgresql_db/vars/default-py3.yml b/REUSE.toml similarity index 52% rename from tests/integration/targets/setup_postgresql_db/vars/default-py3.yml rename to REUSE.toml index 6f96043a39..ff95bb8217 100644 --- a/tests/integration/targets/setup_postgresql_db/vars/default-py3.yml +++ b/REUSE.toml @@ -1,11 +1,11 @@ ---- # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -postgresql_packages: - - "postgresql-server" - - "python3-psycopg2" +version = 1 -pg_hba_location: "/var/lib/pgsql/data/pg_hba.conf" -pg_dir: "/var/lib/pgsql/data" +[[annotations]] +path = "changelogs/fragments/**" +precedence = "aggregate" +SPDX-FileCopyrightText = "Ansible Project" +SPDX-License-Identifier = "GPL-3.0-or-later" diff --git a/antsibull-nox.toml b/antsibull-nox.toml new file mode 100644 index 0000000000..735d572599 --- /dev/null +++ b/antsibull-nox.toml @@ -0,0 +1,99 @@ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +# SPDX-FileCopyrightText: 2025 Felix Fontein + +[collection_sources] +"ansible.posix" = "git+https://github.com/ansible-collections/ansible.posix.git,main" +"community.crypto" = "git+https://github.com/ansible-collections/community.crypto.git,main" +"community.docker" = "git+https://github.com/ansible-collections/community.docker.git,main" +"community.internal_test_tools" = "git+https://github.com/ansible-collections/community.internal_test_tools.git,main" + +[collection_sources_per_ansible.'2.16'] +# community.crypto's main branch needs ansible-core >= 2.17 +"community.crypto" = "git+https://github.com/ansible-collections/community.crypto.git,stable-2" + +[vcs] +vcs = "git" +development_branch = "main" +stable_branches = [ "stable-*" ] + +[sessions] + +[sessions.lint] +run_isort = false +run_black = false +run_flake8 = false +run_pylint = false +run_yamllint = true +yamllint_config = ".yamllint" +# yamllint_config_plugins = ".yamllint-docs" +# yamllint_config_plugins_examples = ".yamllint-examples" +run_mypy = false + +[sessions.docs_check] +validate_collection_refs="all" +codeblocks_restrict_types = [ + "ansible-output", + "console", + "ini", + "json", + "python", + "shell", + "yaml", + "yaml+jinja", + "text", +] +codeblocks_restrict_type_exact_case = true +codeblocks_allow_without_type = false +codeblocks_allow_literal_blocks = false + +[sessions.license_check] + +[sessions.extra_checks] +run_no_unwanted_files = true +no_unwanted_files_module_extensions = [".py"] +no_unwanted_files_yaml_extensions = [".yml"] +run_action_groups = true +run_no_trailing_whitespace = true +no_trailing_whitespace_skip_paths = [ + "tests/integration/targets/iso_extract/files/test.iso", + "tests/integration/targets/java_cert/files/testpkcs.p12", + "tests/integration/targets/one_host/files/testhost/tmp/opennebula-fixtures.json.gz", + "tests/integration/targets/one_template/files/testhost/tmp/opennebula-fixtures.json.gz", + "tests/integration/targets/setup_flatpak_remote/files/repo.tar.xz", +] +no_trailing_whitespace_skip_directories = [ + "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/golden_output/", + "tests/unit/plugins/modules/interfaces_file/interfaces_file_fixtures/input/", +] + +[[sessions.extra_checks.action_groups_config]] +name = "consul" +pattern = "^consul_.*$" +exclusions = [ + "consul_acl_bootstrap", + "consul_kv", +] +doc_fragment = "community.general.consul.actiongroup_consul" + +[[sessions.extra_checks.action_groups_config]] +name = "keycloak" +pattern = "^keycloak_.*$" +exclusions = [ + "keycloak_realm_info", +] +doc_fragment = "community.general.keycloak.actiongroup_keycloak" + +[[sessions.extra_checks.action_groups_config]] +name = "scaleway" +pattern = "^scaleway_.*$" +doc_fragment = "community.general.scaleway.actiongroup_scaleway" + +[sessions.build_import_check] +run_galaxy_importer = true + +[sessions.ansible_test_sanity] +include_devel = true + +[sessions.ansible_test_units] +include_devel = true diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml index e5d5ec8120..f8129d5d73 100644 --- a/changelogs/changelog.yaml +++ b/changelogs/changelog.yaml @@ -1,1026 +1,3 @@ -ancestor: 5.0.0 -releases: - 6.0.0: - changes: - breaking_changes: - - scaleway_container_registry_info - no longer replace ``secret_environment_variables`` - in the output by ``SENSITIVE_VALUE`` (https://github.com/ansible-collections/community.general/pull/5497). - bugfixes: - - iso_create - the module somtimes failed to add folders for Joliet and UDF - formats (https://github.com/ansible-collections/community.general/issues/5275). - - ldap_attrs - fix bug which caused a ``Bad search filter`` error. The error - was occuring when the ldap attribute value contained special characters such - as ``(`` or ``*`` (https://github.com/ansible-collections/community.general/issues/5434, - https://github.com/ansible-collections/community.general/pull/5435). - - snap - allow values in the ``options`` parameter to contain whitespaces (https://github.com/ansible-collections/community.general/pull/5475). - minor_changes: - - ansible_galaxy_install - refactored module to use ``CmdRunner`` to execute - ``ansible-galaxy`` (https://github.com/ansible-collections/community.general/pull/5477). - - cpanm - refactored module to use ``CmdRunner`` to execute ``cpanm`` (https://github.com/ansible-collections/community.general/pull/5485). - - hponcfg - refactored module to use ``CmdRunner`` to execute ``hponcfg`` (https://github.com/ansible-collections/community.general/pull/5483). - - ldap_attrs - allow for DNs to have ``{x}`` prefix on first RDN (https://github.com/ansible-collections/community.general/issues/977, - https://github.com/ansible-collections/community.general/pull/5450). - - mksysb - refactored module to use ``CmdRunner`` to execute ``mksysb`` (https://github.com/ansible-collections/community.general/pull/5484). - - onepassword - support version 2 of the OnePassword CLI (https://github.com/ansible-collections/community.general/pull/4728) - release_summary: New major release of community.general with lots of bugfixes, - new features, some removed deprecated features, and some other breaking changes. - Please check the coresponding sections of the changelog for more details. - fragments: - - 4728-onepassword-v2.yml - - 5435-escape-ldap-param.yml - - 5450-allow-for-xordered-dns.yaml - - 5468-iso-create-not-add-folders.yml - - 5475-snap-option-value-whitespace.yml - - 5477-ansible-galaxy-install-cmd-runner.yml - - 5483-hponcfg-cmd-runner.yml - - 5484-mksysb-cmd-runner.yml - - 5485-cpanm-cmd-runner.yml - - 5497-scaleway-filtering.yml - - 6.0.0.yml - modules: - - description: Scaleway Container management - name: scaleway_container - namespace: '' - - description: Retrieve information on Scaleway Container - name: scaleway_container_info - namespace: '' - - description: Scaleway Container namespace management - name: scaleway_container_namespace - namespace: '' - - description: Retrieve information on Scaleway Container namespace - name: scaleway_container_namespace_info - namespace: '' - - description: Scaleway Function management - name: scaleway_function - namespace: '' - - description: Retrieve information on Scaleway Function - name: scaleway_function_info - namespace: '' - release_date: '2022-11-07' - 6.0.0-a1: - changes: - breaking_changes: - - newrelic_deployment - ``revision`` is required for v2 API (https://github.com/ansible-collections/community.general/pull/5341). - bugfixes: - - Include ``PSF-license.txt`` file for ``plugins/module_utils/_mount.py``. - - Include ``simplified_bsd.txt`` license file for various module utils, the - ``lxca_common`` docs fragment, and the ``utm_utils`` unit tests. - - alternatives - do not set the priority if the priority was not set by the - user (https://github.com/ansible-collections/community.general/pull/4810). - - alternatives - only pass subcommands when they are specified as module arguments - (https://github.com/ansible-collections/community.general/issues/4803, https://github.com/ansible-collections/community.general/issues/4804, - https://github.com/ansible-collections/community.general/pull/4836). - - alternatives - when ``subcommands`` is specified, ``link`` must be given for - every subcommand. This was already mentioned in the documentation, but not - enforced by the code (https://github.com/ansible-collections/community.general/pull/4836). - - apache2_mod_proxy - avoid crash when reporting inability to parse balancer_member_page - HTML caused by using an undefined variable in the error message (https://github.com/ansible-collections/community.general/pull/5111). - - archive - avoid crash when ``lzma`` is not present and ``format`` is not ``xz`` - (https://github.com/ansible-collections/community.general/pull/5393). - - cmd_runner module utils - fix bug caused by using the ``command`` variable - instead of ``self.command`` when looking for binary path (https://github.com/ansible-collections/community.general/pull/4903). - - consul - fixed bug introduced in PR 4590 (https://github.com/ansible-collections/community.general/issues/4680). - - credstash lookup plugin - pass plugin options to credstash for all terms, - not just for the first (https://github.com/ansible-collections/community.general/pull/5440). - - dig lookup plugin - add option to return empty result without empty strings, - and return empty list instead of ``NXDOMAIN`` (https://github.com/ansible-collections/community.general/pull/5439, - https://github.com/ansible-collections/community.general/issues/5428). - - dig lookup plugin - fix evaluation of falsy values for boolean parameters - ``fail_on_error`` and ``retry_servfail`` (https://github.com/ansible-collections/community.general/pull/5129). - - dnsimple_info - correctly report missing library as ``requests`` and not ``another_library`` - (https://github.com/ansible-collections/community.general/pull/5111). - - dnstxt lookup plugin - add option to return empty result without empty strings, - and return empty list instead of ``NXDOMAIN`` (https://github.com/ansible-collections/community.general/pull/5457, - https://github.com/ansible-collections/community.general/issues/5428). - - dsv lookup plugin - do not ignore the ``tld`` parameter (https://github.com/ansible-collections/community.general/pull/4911). - - filesystem - handle ``fatresize --info`` output lines without ``:`` (https://github.com/ansible-collections/community.general/pull/4700). - - filesystem - improve error messages when output cannot be parsed by including - newlines in escaped form (https://github.com/ansible-collections/community.general/pull/4700). - - funcd connection plugin - fix signature of ``exec_command`` (https://github.com/ansible-collections/community.general/pull/5111). - - ini_file - minor refactor fixing a python lint error (https://github.com/ansible-collections/community.general/pull/5307). - - keycloak_realm - fix default groups and roles (https://github.com/ansible-collections/community.general/issues/4241). - - keyring_info - fix the result from the keyring library never getting returned - (https://github.com/ansible-collections/community.general/pull/4964). - - ldap_attrs - fix ordering issue by ignoring the ``{x}`` prefix on attribute - values (https://github.com/ansible-collections/community.general/issues/977, - https://github.com/ansible-collections/community.general/pull/5385). - - listen_ports_facts - removed leftover ``EnvironmentError`` . The ``else`` - clause had a wrong indentation. The check is now handled in the ``split_pid_name`` - function (https://github.com/ansible-collections/community.general/pull/5202). - - locale_gen - fix support for Ubuntu (https://github.com/ansible-collections/community.general/issues/5281). - - lxc_container - the module has been updated to support Python 3 (https://github.com/ansible-collections/community.general/pull/5304). - - lxd connection plugin - fix incorrect ``inventory_hostname`` in ``remote_addr``. - This is needed for compatibility with ansible-core 2.13 (https://github.com/ansible-collections/community.general/issues/4886). - - manageiq_alert_profiles - avoid crash when reporting unknown profile caused - by trying to return an undefined variable (https://github.com/ansible-collections/community.general/pull/5111). - - nmcli - avoid changed status for most cases with VPN connections (https://github.com/ansible-collections/community.general/pull/5126). - - nmcli - fix error caused by adding undefined module arguments for list options - (https://github.com/ansible-collections/community.general/issues/4373, https://github.com/ansible-collections/community.general/pull/4813). - - 'nmcli - fix error when setting previously unset MAC address, ``gsm.apn`` - or ``vpn.data``: current values were being normalized without checking if - they might be ``None`` (https://github.com/ansible-collections/community.general/pull/5291).' - - nmcli - fix int options idempotence (https://github.com/ansible-collections/community.general/issues/4998). - - nsupdate - compatibility with NS records (https://github.com/ansible-collections/community.general/pull/5112). - - nsupdate - fix silent failures when updating ``NS`` entries from Bind9 managed - DNS zones (https://github.com/ansible-collections/community.general/issues/4657). - - opentelemetry callback plugin - support opentelemetry-api 1.13.0 that removed - support for ``_time_ns`` (https://github.com/ansible-collections/community.general/pull/5342). - - osx_defaults - no longer expand ``~`` in ``value`` to the user's home directory, - or expand environment variables (https://github.com/ansible-collections/community.general/issues/5234, - https://github.com/ansible-collections/community.general/pull/5243). - - packet_ip_subnet - fix error reporting in case of invalid CIDR prefix lengths - (https://github.com/ansible-collections/community.general/pull/5111). - - pacman - fixed name resolution of URL packages (https://github.com/ansible-collections/community.general/pull/4959). - - passwordstore lookup plugin - fix ``returnall`` for gopass (https://github.com/ansible-collections/community.general/pull/5027). - - passwordstore lookup plugin - fix password store path detection for gopass - (https://github.com/ansible-collections/community.general/pull/4955). - - pfexec become plugin - remove superflous quotes preventing exe wrap from working - as expected (https://github.com/ansible-collections/community.general/issues/3671, - https://github.com/ansible-collections/community.general/pull/3889). - - pip_package_info - remove usage of global variable (https://github.com/ansible-collections/community.general/pull/5111). - - pkgng - fix case when ``pkg`` fails when trying to upgrade all packages (https://github.com/ansible-collections/community.general/issues/5363). - - proxmox - fix error handling when getting VM by name when ``state=absent`` - (https://github.com/ansible-collections/community.general/pull/4945). - - proxmox inventory plugin - fix crash when ``enabled=1`` is used in agent config - string (https://github.com/ansible-collections/community.general/pull/4910). - - proxmox inventory plugin - fixed extended status detection for qemu (https://github.com/ansible-collections/community.general/pull/4816). - - proxmox_kvm - fix ``agent`` parameter when boolean value is specified (https://github.com/ansible-collections/community.general/pull/5198). - - proxmox_kvm - fix error handling when getting VM by name when ``state=absent`` - (https://github.com/ansible-collections/community.general/pull/4945). - - proxmox_kvm - fix exception when no ``agent`` argument is specified (https://github.com/ansible-collections/community.general/pull/5194). - - proxmox_kvm - fix wrong condition (https://github.com/ansible-collections/community.general/pull/5108). - - proxmox_kvm - replace new condition with proper condition to allow for using - ``vmid`` on update (https://github.com/ansible-collections/community.general/pull/5206). - - rax_clb_nodes - fix code to be compatible with Python 3 (https://github.com/ansible-collections/community.general/pull/4933). - - redfish_command - fix the check if a virtual media is unmounted to just check - for ``instered= false`` caused by Supermicro hardware that does not clear - the ``ImageName`` (https://github.com/ansible-collections/community.general/pull/4839). - - redfish_command - the Supermicro Redfish implementation only supports the - ``image_url`` parameter in the underlying API calls to ``VirtualMediaInsert`` - and ``VirtualMediaEject``. Any values set (or the defaults) for ``write_protected`` - or ``inserted`` will be ignored (https://github.com/ansible-collections/community.general/pull/4839). - - redfish_info - fix to ``GetChassisPower`` to correctly report power information - when multiple chassis exist, but not all chassis report power information - (https://github.com/ansible-collections/community.general/issues/4901). - - redfish_utils module utils - centralize payload checking when performing modification - requests to a Redfish service (https://github.com/ansible-collections/community.general/issues/5210/). - - redhat_subscription - fix unsubscribing on RHEL 9 (https://github.com/ansible-collections/community.general/issues/4741). - - redhat_subscription - make module idempotent when ``pool_ids`` are used (https://github.com/ansible-collections/community.general/issues/5313). - - redis* modules - fix call to ``module.fail_json`` when failing because of - missing Python libraries (https://github.com/ansible-collections/community.general/pull/4733). - - slack - fix incorrect channel prefix ``#`` caused by incomplete pattern detection - by adding ``G0`` and ``GF`` as channel ID patterns (https://github.com/ansible-collections/community.general/pull/5019). - - slack - fix message update for channels which start with ``CP``. When ``message-id`` - was passed it failed for channels which started with ``CP`` because the ``#`` - symbol was added before the ``channel_id`` (https://github.com/ansible-collections/community.general/pull/5249). - - sudoers - ensure sudoers config files are created with the permissions requested - by sudoers (0440) (https://github.com/ansible-collections/community.general/pull/4814). - - 'sudoers - fix incorrect handling of ``state: absent`` (https://github.com/ansible-collections/community.general/issues/4852).' - - tss lookup plugin - adding support for updated Delinea library (https://github.com/DelineaXPM/python-tss-sdk/issues/9, - https://github.com/ansible-collections/community.general/pull/5151). - - virtualbox inventory plugin - skip parsing values with keys that have both - a value and nested data. Skip parsing values that are nested more than two - keys deep (https://github.com/ansible-collections/community.general/issues/5332, - https://github.com/ansible-collections/community.general/pull/5348). - - xcc_redfish_command - for compatibility due to Redfish spec changes the virtualMedia - resource location changed from Manager to System (https://github.com/ansible-collections/community.general/pull/4682). - - xenserver_facts - fix broken ``AnsibleModule`` call that prevented the module - from working at all (https://github.com/ansible-collections/community.general/pull/5383). - - xfconf - fix setting of boolean values (https://github.com/ansible-collections/community.general/issues/4999, - https://github.com/ansible-collections/community.general/pull/5007). - - zfs - fix wrong quoting of properties (https://github.com/ansible-collections/community.general/issues/4707, - https://github.com/ansible-collections/community.general/pull/4726). - deprecated_features: - - ArgFormat module utils - deprecated along ``CmdMixin``, in favor of the ``cmd_runner_fmt`` - module util (https://github.com/ansible-collections/community.general/pull/5370). - - CmdMixin module utils - deprecated in favor of the ``CmdRunner`` module util - (https://github.com/ansible-collections/community.general/pull/5370). - - CmdModuleHelper module utils - deprecated in favor of the ``CmdRunner`` module - util (https://github.com/ansible-collections/community.general/pull/5370). - - CmdStateModuleHelper module utils - deprecated in favor of the ``CmdRunner`` - module util (https://github.com/ansible-collections/community.general/pull/5370). - - cmd_runner module utils - deprecated ``fmt`` in favour of ``cmd_runner_fmt`` - as the parameter format object (https://github.com/ansible-collections/community.general/pull/4777). - - django_manage - support for Django releases older than 4.1 has been deprecated - and will be removed in community.general 9.0.0 (https://github.com/ansible-collections/community.general/pull/5400). - - django_manage - support for the commands ``cleanup``, ``syncdb`` and ``validate`` - that have been deprecated in Django long time ago will be removed in community.general - 9.0.0 (https://github.com/ansible-collections/community.general/pull/5400). - - django_manage - the behavior of "creating the virtual environment when missing" - is being deprecated and will be removed in community.general version 9.0.0 - (https://github.com/ansible-collections/community.general/pull/5405). - - gconftool2 - deprecates ``state=get`` in favor of using the module ``gconftool2_info`` - (https://github.com/ansible-collections/community.general/pull/4778). - - lxc_container - the module will no longer make any effort to support Python - 2 (https://github.com/ansible-collections/community.general/pull/5304). - - newrelic_deployment - ``appname`` and ``environment`` are no longer valid - options in the v2 API. They will be removed in community.general 7.0.0 (https://github.com/ansible-collections/community.general/pull/5341). - - proxmox - deprecated the current ``unprivileged`` default value, will be changed - to ``true`` in community.general 7.0.0 (https://github.com/pull/5224). - - xfconf - deprecated parameter ``disable_facts``, as since version 4.0.0 it - only allows value ``true`` (https://github.com/ansible-collections/community.general/pull/4520). - major_changes: - - The internal structure of the collection was changed for modules and action - plugins. These no longer live in a directory hierarchy ordered by topic, but - instead are now all in a single (flat) directory. This has no impact on users - *assuming they did not use internal FQCNs*. These will still work, but result - in deprecation warnings. They were never officially supported and thus the - redirects are kept as a courtsey, and this is not labelled as a breaking change. - Note that for example the Ansible VScode plugin started recommending these - internal names. If you followed its recommendation, you will now have to change - back to the short names to avoid deprecation warnings, and potential errors - in the future as these redirects will be removed in community.general 9.0.0 - (https://github.com/ansible-collections/community.general/pull/5461). - - newrelic_deployment - removed New Relic v1 API, added support for v2 API (https://github.com/ansible-collections/community.general/pull/5341). - minor_changes: - - Added MIT license as ``LICENSES/MIT.txt`` for tests/unit/plugins/modules/packaging/language/test_gem.py - (https://github.com/ansible-collections/community.general/pull/5065). - - All software licenses are now in the ``LICENSES/`` directory of the collection - root (https://github.com/ansible-collections/community.general/pull/5065, - https://github.com/ansible-collections/community.general/pull/5079, https://github.com/ansible-collections/community.general/pull/5080, - https://github.com/ansible-collections/community.general/pull/5083, https://github.com/ansible-collections/community.general/pull/5087, - https://github.com/ansible-collections/community.general/pull/5095, https://github.com/ansible-collections/community.general/pull/5098, - https://github.com/ansible-collections/community.general/pull/5106). - - ModuleHelper module utils - added property ``verbosity`` to base class (https://github.com/ansible-collections/community.general/pull/5035). - - ModuleHelper module utils - improved ``ModuleHelperException``, using ``to_native()`` - for the exception message (https://github.com/ansible-collections/community.general/pull/4755). - - The collection repository conforms to the `REUSE specification `__ - except for the changelog fragments (https://github.com/ansible-collections/community.general/pull/5138). - - ali_instance - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5240). - - ali_instance_info - minor refactor when checking for installed dependency - (https://github.com/ansible-collections/community.general/pull/5240). - - alternatives - add ``state=absent`` to be able to remove an alternative (https://github.com/ansible-collections/community.general/pull/4654). - - alternatives - add ``subcommands`` parameter (https://github.com/ansible-collections/community.general/pull/4654). - - ansible_galaxy_install - minor refactoring using latest ``ModuleHelper`` updates - (https://github.com/ansible-collections/community.general/pull/4752). - - apk - add ``world`` parameter for supporting a custom world file (https://github.com/ansible-collections/community.general/pull/4976). - - bitwarden lookup plugin - add option ``search`` to search for other attributes - than name (https://github.com/ansible-collections/community.general/pull/5297). - - cartesian lookup plugin - start using Ansible's configuration manager to parse - options (https://github.com/ansible-collections/community.general/pull/5440). - - cmd_runner module util - added parameters ``check_mode_skip`` and ``check_mode_return`` - to ``CmdRunner.context()``, so that the command is not executed when ``check_mode=True`` - (https://github.com/ansible-collections/community.general/pull/4736). - - cmd_runner module utils - add ``__call__`` method to invoke context (https://github.com/ansible-collections/community.general/pull/4791). - - consul - adds ``ttl`` parameter for session (https://github.com/ansible-collections/community.general/pull/4996). - - consul - minor refactoring (https://github.com/ansible-collections/community.general/pull/5367). - - consul_session - adds ``token`` parameter for session (https://github.com/ansible-collections/community.general/pull/5193). - - cpanm - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived - modules (https://github.com/ansible-collections/community.general/pull/4674). - - credstash lookup plugin - start using Ansible's configuration manager to parse - options (https://github.com/ansible-collections/community.general/pull/5440). - - dependent lookup plugin - start using Ansible's configuration manager to parse - options (https://github.com/ansible-collections/community.general/pull/5440). - - dig lookup plugin - add option ``fail_on_error`` to allow stopping execution - on lookup failures (https://github.com/ansible-collections/community.general/pull/4973). - - dig lookup plugin - start using Ansible's configuration manager to parse options. - All documented options can now also be passed as lookup parameters (https://github.com/ansible-collections/community.general/pull/5440). - - dnstxt lookup plugin - start using Ansible's configuration manager to parse - options (https://github.com/ansible-collections/community.general/pull/5440). - - filetree lookup plugin - start using Ansible's configuration manager to parse - options (https://github.com/ansible-collections/community.general/pull/5440). - - flattened lookup plugin - start using Ansible's configuration manager to parse - options (https://github.com/ansible-collections/community.general/pull/5440). - - gitlab module util - minor refactor when checking for installed dependency - (https://github.com/ansible-collections/community.general/pull/5259). - - gitlab_branch - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259). - - gitlab_deploy_key - minor refactor when checking for installed dependency - (https://github.com/ansible-collections/community.general/pull/5259). - - gitlab_group - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259). - - gitlab_group_members - minor refactor when checking for installed dependency - (https://github.com/ansible-collections/community.general/pull/5259). - - gitlab_group_variable - minor refactor when checking for installed dependency - (https://github.com/ansible-collections/community.general/pull/5259). - - gitlab_hook - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259). - - gitlab_hook - minor refactoring (https://github.com/ansible-collections/community.general/pull/5271). - - gitlab_project - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259). - - gitlab_project_members - minor refactor when checking for installed dependency - (https://github.com/ansible-collections/community.general/pull/5259). - - gitlab_project_variable - minor refactor when checking for installed dependency - (https://github.com/ansible-collections/community.general/pull/5259). - - gitlab_protected_branch - minor refactor when checking for installed dependency - (https://github.com/ansible-collections/community.general/pull/5259). - - gitlab_runner - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259). - - gitlab_user - minor refactor when checking for installed dependency (https://github.com/ansible-collections/community.general/pull/5259). - - hiera lookup plugin - start using Ansible's configuration manager to parse - options. The Hiera executable and config file can now also be passed as lookup - parameters (https://github.com/ansible-collections/community.general/pull/5440). - - homebrew, homebrew_tap - added Homebrew on Linux path to defaults (https://github.com/ansible-collections/community.general/pull/5241). - - keycloak_* modules - add ``http_agent`` parameter with default value ``Ansible`` - (https://github.com/ansible-collections/community.general/issues/5023). - - keyring lookup plugin - start using Ansible's configuration manager to parse - options (https://github.com/ansible-collections/community.general/pull/5440). - - lastpass - use config manager for handling plugin options (https://github.com/ansible-collections/community.general/pull/5022). - - linode inventory plugin - simplify option handling (https://github.com/ansible-collections/community.general/pull/5438). - - listen_ports_facts - add new ``include_non_listening`` option which adds ``-a`` - option to ``netstat`` and ``ss``. This shows both listening and non-listening - (for TCP this means established connections) sockets, and returns ``state`` - and ``foreign_address`` (https://github.com/ansible-collections/community.general/issues/4762, - https://github.com/ansible-collections/community.general/pull/4953). - - lmdb_kv lookup plugin - start using Ansible's configuration manager to parse - options (https://github.com/ansible-collections/community.general/pull/5440). - - lxc_container - minor refactoring (https://github.com/ansible-collections/community.general/pull/5358). - - machinectl become plugin - can now be used with a password from another user - than root, if a polkit rule is present (https://github.com/ansible-collections/community.general/pull/4849). - - machinectl become plugin - combine the success command when building the become - command to be consistent with other become plugins (https://github.com/ansible-collections/community.general/pull/5287). - - manifold lookup plugin - start using Ansible's configuration manager to parse - options (https://github.com/ansible-collections/community.general/pull/5440). - - maven_artifact - add a new ``unredirected_headers`` option that can be used - with ansible-core 2.12 and above. The default value is to not use ``Authorization`` - and ``Cookie`` headers on redirects for security reasons. With ansible-core - 2.11, all headers are still passed on for redirects (https://github.com/ansible-collections/community.general/pull/4812). - - mksysb - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived - modules (https://github.com/ansible-collections/community.general/pull/4674). - - nagios - minor refactoring on parameter validation for different actions (https://github.com/ansible-collections/community.general/pull/5239). - - netcup_dnsapi - add ``timeout`` parameter (https://github.com/ansible-collections/community.general/pull/5301). - - nmcli - add ``transport_mode`` configuration for Infiniband devices (https://github.com/ansible-collections/community.general/pull/5361). - - nmcli - add bond option ``xmit_hash_policy`` to bond options (https://github.com/ansible-collections/community.general/issues/5148). - - nmcli - adds ``vpn`` type and parameter for supporting VPN with service type - L2TP and PPTP (https://github.com/ansible-collections/community.general/pull/4746). - - nmcli - honor IP options for VPNs (https://github.com/ansible-collections/community.general/pull/5228). - - opentelemetry callback plugin - allow configuring opentelementry callback - via config file (https://github.com/ansible-collections/community.general/pull/4916). - - opentelemetry callback plugin - send logs. This can be disabled by setting - ``disable_logs=false`` (https://github.com/ansible-collections/community.general/pull/4175). - - pacman - added parameters ``reason`` and ``reason_for`` to set/change the - install reason of packages (https://github.com/ansible-collections/community.general/pull/4956). - - passwordstore lookup plugin - allow options to be passed lookup options instead - of being part of the term strings (https://github.com/ansible-collections/community.general/pull/5444). - - passwordstore lookup plugin - allow using alternative password managers by - detecting wrapper scripts, allow explicit configuration of pass and gopass - backends (https://github.com/ansible-collections/community.general/issues/4766). - - passwordstore lookup plugin - improve error messages to include stderr (https://github.com/ansible-collections/community.general/pull/5436) - - pipx - added state ``latest`` to the module (https://github.com/ansible-collections/community.general/pull/5105). - - pipx - changed implementation to use ``cmd_runner`` (https://github.com/ansible-collections/community.general/pull/5085). - - pipx - module fails faster when ``name`` is missing for states ``upgrade`` - and ``reinstall`` (https://github.com/ansible-collections/community.general/pull/5100). - - pipx - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived - modules (https://github.com/ansible-collections/community.general/pull/4674). - - pipx module utils - created new module util ``pipx`` providing a ``cmd_runner`` - specific for the ``pipx`` module (https://github.com/ansible-collections/community.general/pull/5085). - - portage - add knobs for Portage's ``--backtrack`` and ``--with-bdeps`` options - (https://github.com/ansible-collections/community.general/pull/5349). - - portage - use Portage's python module instead of calling gentoolkit-provided - program in shell (https://github.com/ansible-collections/community.general/pull/5349). - - proxmox inventory plugin - added new flag ``qemu_extended_statuses`` and new - groups ``prelaunch``, ``paused``. They will be - populated only when ``want_facts=true``, ``qemu_extended_statuses=true`` and - only for ``QEMU`` machines (https://github.com/ansible-collections/community.general/pull/4723). - - proxmox inventory plugin - simplify option handling code (https://github.com/ansible-collections/community.general/pull/5437). - - proxmox module utils, the proxmox* modules - add ``api_task_ok`` helper to - standardize API task status checks across all proxmox modules (https://github.com/ansible-collections/community.general/pull/5274). - - proxmox_kvm - allow ``agent`` argument to be a string (https://github.com/ansible-collections/community.general/pull/5107). - - proxmox_snap - add ``unbind`` param to support snapshotting containers with - configured mountpoints (https://github.com/ansible-collections/community.general/pull/5274). - - puppet - adds ``confdir`` parameter to configure a custom confir location - (https://github.com/ansible-collections/community.general/pull/4740). - - redfish - added new command GetVirtualMedia, VirtualMediaInsert and VirtualMediaEject - to Systems category due to Redfish spec changes the virtualMedia resource - location from Manager to System (https://github.com/ansible-collections/community.general/pull/5124). - - redfish_config - add ``SetSessionService`` to set default session timeout - policy (https://github.com/ansible-collections/community.general/issues/5008). - - redfish_info - add ``GetManagerInventory`` to report list of Manager inventory - information (https://github.com/ansible-collections/community.general/issues/4899). - - seport - added new argument ``local`` (https://github.com/ansible-collections/community.general/pull/5203) - - snap - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived - modules (https://github.com/ansible-collections/community.general/pull/4674). - - sudoers - will attempt to validate the proposed sudoers rule using visudo - if available, optionally skipped, or required (https://github.com/ansible-collections/community.general/pull/4794, - https://github.com/ansible-collections/community.general/issues/4745). - - terraform - adds capability to handle complex variable structures for ``variables`` - parameter in the module. This must be enabled with the new ``complex_vars`` - parameter (https://github.com/ansible-collections/community.general/pull/4797). - - terraform - run ``terraform init`` with ``-no-color`` not to mess up the stdout - of the task (https://github.com/ansible-collections/community.general/pull/5147). - - wdc_redfish_command - add ``IndicatorLedOn`` and ``IndicatorLedOff`` commands - for ``Chassis`` category (https://github.com/ansible-collections/community.general/pull/5059). - - wdc_redfish_command - add ``PowerModeLow`` and ``PowerModeNormal`` commands - for ``Chassis`` category (https://github.com/ansible-collections/community.general/pull/5145). - - xfconf - add ``stdout``, ``stderr`` and ``cmd`` to the module results (https://github.com/ansible-collections/community.general/pull/5037). - - xfconf - changed implementation to use ``cmd_runner`` (https://github.com/ansible-collections/community.general/pull/4776). - - xfconf - use ``do_raise()`` instead of defining custom exception class (https://github.com/ansible-collections/community.general/pull/4975). - - xfconf - using ``do_raise()`` to raise exceptions in ``ModuleHelper`` derived - modules (https://github.com/ansible-collections/community.general/pull/4674). - - xfconf module utils - created new module util ``xfconf`` providing a ``cmd_runner`` - specific for ``xfconf`` modules (https://github.com/ansible-collections/community.general/pull/4776). - - xfconf_info - changed implementation to use ``cmd_runner`` (https://github.com/ansible-collections/community.general/pull/4776). - - xfconf_info - use ``do_raise()`` instead of defining custom exception class - (https://github.com/ansible-collections/community.general/pull/4975). - - znode - possibility to use ZooKeeper ACL authentication (https://github.com/ansible-collections/community.general/pull/5306). - release_summary: This is a pre-release for the upcoming 6.0.0 major release. - The main objective of this pre-release is to make it possible to test the - large stuctural changes by flattening the directory structure. See the corresponding - entry in the changelog for details. - removed_features: - - bitbucket* modules - ``username`` is no longer an alias of ``workspace``, - but of ``user`` (https://github.com/ansible-collections/community.general/pull/5326). - - gem - the default of the ``norc`` option changed from ``false`` to ``true`` - (https://github.com/ansible-collections/community.general/pull/5326). - - gitlab_group_members - ``gitlab_group`` must now always contain the full path, - and no longer just the name or path (https://github.com/ansible-collections/community.general/pull/5326). - - keycloak_authentication - the return value ``flow`` has been removed. Use - ``end_state`` instead (https://github.com/ansible-collections/community.general/pull/5326). - - keycloak_group - the return value ``group`` has been removed. Use ``end_state`` - instead (https://github.com/ansible-collections/community.general/pull/5326). - - lxd_container - the default of the ``ignore_volatile_options`` option changed - from ``true`` to ``false`` (https://github.com/ansible-collections/community.general/pull/5326). - - mail callback plugin - the ``sender`` option is now required (https://github.com/ansible-collections/community.general/pull/5326). - - module_helper module utils - remove the ``VarDict`` attribute from ``ModuleHelper``. - Import ``VarDict`` from ``ansible_collections.community.general.plugins.module_utils.mh.mixins.vars`` - instead (https://github.com/ansible-collections/community.general/pull/5326). - - proxmox inventory plugin - the default of the ``want_proxmox_nodes_ansible_host`` - option changed from ``true`` to ``false`` (https://github.com/ansible-collections/community.general/pull/5326). - - vmadm - the ``debug`` option has been removed. It was not used anyway (https://github.com/ansible-collections/community.general/pull/5326). - fragments: - - 3671-illumos-pfexec.yml - - 4175-opentelemetry_logs.yml - - 4520-xfconf-deprecate-disable-facts.yml - - 4654-alternatives-add-subcommands.yml - - 4674-use-mh-raise.yaml - - 4682-compatibility-virtualmedia-resource-location.yaml - - 4700-code-changes.yml - - 4712-consul-bugfix.yaml - - 4719-fix-keycloak-realm.yaml - - 4724-proxmox-qemu-extend.yaml - - 4726-zfs.yml - - 4733-redis-fail.yml - - 4736-cmd-runner-skip-if-check.yml - - 4740-puppet-feature.yaml - - 4746-add-vpn-support-nmcli.yaml - - 4752-ansible-galaxy-install-mh-updates.yml - - 4755-mhexception-improvement.yml - - 4776-xfconf-cmd-runner.yaml - - 4777-cmd-runner-deprecate-fmt.yaml - - 4778-gconftool2-deprecate-state-get.yaml - - 4780-passwordstore-wrapper-compat.yml - - 4791-cmd-runner-callable.yaml - - 4794-sudoers-validation.yml - - 4797-terraform-complex-variables.yml - - 4809-redhat_subscription-unsubscribe.yaml - - 4810-alternatives-bug.yml - - 4812-expose-unredirected-headers.yml - - 4813-fix-nmcli-convert-list.yaml - - 4814-sudoers-file-permissions.yml - - 4816-proxmox-fix-extended-status.yaml - - 4836-alternatives.yml - - 4839-fix-VirtualMediaInsert-Supermicro.yml - - 4849-add-password-prompt-support-for-machinectl.yml - - 4852-sudoers-state-absent.yml - - 4886-fix-lxd-inventory-hostname.yml - - 4899-add-GetManagerInventory-for-redfish_info.yml - - 4901-fix-redfish-chassispower.yml - - 4903-cmdrunner-bugfix.yaml - - 4910-fix-for-agent-enabled.yml - - 4911-dsv-honor-tld-option.yml - - 4916-opentelemetry-ini-options.yaml - - 4933-fix-rax-clb-nodes.yaml - - 4945-fix-get_vm-int-parse-handling.yaml - - 4953-listen-ports-facts-extend-output.yaml - - 4955-fix-path-detection-for-gopass.yaml - - 4956-pacman-install-reason.yaml - - 4959-pacman-fix-url-packages-name.yaml - - 4964-fix-keyring-info.yml - - 4973-introduce-dig-lookup-argument.yaml - - 4975-xfconf-use-do-raise.yaml - - 4976-apk-add-support-for-a-custom-world-file.yaml - - 4996-consul-session-ttl.yml - - 4998-nmcli-fix-int-options-idempotence.yml - - 4999-xfconf-bool.yml - - 5008-addSetSessionService.yml - - 5019-slack-support-more-groups.yml - - 5022-lastpass-lookup-cleanup.yml - - 5023-http-agent-param-keycloak.yml - - 5027-fix-returnall-for-gopass.yaml - - 5035-mh-base-verbosity.yaml - - 5037-xfconf-add-cmd-output.yaml - - 5059-wdc_redfish_command-indicator-leds.yml - - 5085-pipx-use-cmd-runner.yaml - - 5100-pipx-req-if.yaml - - 5105-pipx-state-latest.yaml - - 5107-proxmox-agent-argument.yaml - - 5108-proxmox-node-name-condition.yml - - 5111-fixes.yml - - 5112-fix-nsupdate-ns-entry.yaml - - 5124-compatibility-virtualmedia-resource-location.yaml - - 5126-nmcli-remove-diffs.yml - - 5129-dig-boolean-params-fix.yml - - 5145-wdc-redfish-enclosure-power-state.yml - - 5147-terraform-init-no-color.yml - - 5149-nmcli-bond-option.yml - - 5151-add-delinea-support-tss-lookup.yml - - 5193-consul-session-token.yaml - - 5194-fix-proxmox-agent-exception.yaml - - 5198-proxmox.yml - - 5202-bugfix-environmentError-wrong-indentation.yaml - - 5203-seport-add-local-argument.yaml - - 5206-proxmox-conditional-vmid.yml - - 5210-redfish_utils-cleanup-of-configuration-logic-and-oem-checks.yaml - - 5224-proxmox-unprivileged-default.yaml - - 5228-nmcli-ip-options.yaml - - 5239-nagios-refactor.yaml - - 5240-unused-imports.yaml - - 5241-homebrew-add-linux-path.yaml - - 5243-osx-defaults-expand-user-flags.yml - - 5249-add-new-channel-prefix.yml - - 5259-gitlab-imports.yaml - - 5271-gitlab_hook-refactor.yaml - - 5274-proxmox-snap-container-with-mountpoints.yml - - 5280-lxc_container-py3.yaml - - 5282-locale_gen.yaml - - 5287-machinectl-become-success.yml - - 5291-fix-nmcli-error-when-setting-unset-mac-address.yaml - - 5297-bitwarden-add-search-field.yml - - 5301-netcup_dnsapi-timeout.yml - - 5306-add-options-for-authentication.yml - - 5307-ini_file-lint.yaml - - 5313-fix-redhat_subscription-idempotency-pool_ids.yml - - 5341-newrelic-v2-api-changes.yml - - 5342-opentelemetry_bug_fix_opentelemetry-api-1.13.yml - - 5348-fix-vbox-deeply-nested-hostvars.yml - - 5349-drop-gentoolkit-more-knobs.yml - - 5358-lxc-container-refactor.yml - - 5361-nmcli-add-infiniband-transport-mode.yaml - - 5367-consul-refactor.yaml - - 5369-pkgng-fix-update-all.yaml - - 5370-mh-cmdmixin-deprecation.yaml - - 5377-nsupdate-ns-records-with-bind.yml - - 5383-xenserver_facts.yml - - 5385-search_s-based-_is_value_present.yaml - - 5393-archive.yml - - 5400-django-manage-deprecations.yml - - 5404-django-manage-venv-deprecation.yml - - 5436-passwordstore-errors.yml - - 5437-proxmox.yml - - 5438-linode.yml - - 5439-dig-return-empty-result.yml - - 5444-passwordstore-options.yml - - 5457-dnstxt-empty.yml - - 6.0.0-a1.yml - - deprecation-removals.yml - - licenses-2.yml - - licenses.yml - - lookup-options.yml - - psf-license.yml - - simplified-bsd-license.yml - - unflatmap.yml - modules: - - description: Retrieve GConf configurations - name: gconftool2_info - namespace: '' - - description: Add/remove/change files in ISO file - name: iso_customize - namespace: '' - - description: Allows administration of Keycloak user_rolemapping with the Keycloak - API - name: keycloak_user_rolemapping - namespace: '' - - description: Set or delete a passphrase using the Operating System's native - keyring - name: keyring - namespace: '' - - description: Get a passphrase using the Operating System's native keyring - name: keyring_info - namespace: '' - - description: Listing of resource policy_profiles in ManageIQ - name: manageiq_policies_info - namespace: '' - - description: Retrieve resource tags in ManageIQ - name: manageiq_tags_info - namespace: '' - - description: Rretrieves information about applications installed with pipx - name: pipx_info - namespace: '' - - description: Management of a disk of a Qemu(KVM) VM in a Proxmox VE cluster. - name: proxmox_disk - namespace: '' - - description: Scaleway compute - private network management - name: scaleway_compute_private_network - namespace: '' - - description: Scaleway Container registry management module - name: scaleway_container_registry - namespace: '' - - description: Scaleway Container registry info module - name: scaleway_container_registry_info - namespace: '' - - description: Scaleway Function namespace management - name: scaleway_function_namespace - namespace: '' - - description: Retrieve information on Scaleway Function namespace - name: scaleway_function_namespace_info - namespace: '' - - description: Manages WDC UltraStar Data102 Out-Of-Band controllers using Redfish - APIs - name: wdc_redfish_command - namespace: '' - - description: Manages WDC UltraStar Data102 Out-Of-Band controllers using Redfish - APIs - name: wdc_redfish_info - namespace: '' - plugins: - filter: - - description: Counts hashable elements in a sequence - name: counter - namespace: null - lookup: - - description: Retrieve secrets from Bitwarden - name: bitwarden - namespace: null - release_date: '2022-11-02' - 6.0.1: - changes: - bugfixes: - - dependent lookup plugin - avoid warning on deprecated parameter for ``Templar.template()`` - (https://github.com/ansible-collections/community.general/pull/5543). - - jenkins_build - fix the logical flaw when deleting a Jenkins build (https://github.com/ansible-collections/community.general/pull/5514). - - one_vm - avoid splitting labels that are ``None`` (https://github.com/ansible-collections/community.general/pull/5489). - - onepassword_raw - add missing parameter to plugin documentation (https://github.com/ansible-collections/community.general/issues/5506). - - proxmox_disk - avoid duplicate ``vmid`` reference (https://github.com/ansible-collections/community.general/issues/5492, - https://github.com/ansible-collections/community.general/pull/5493). - release_summary: Bugfix release for Ansible 7.0.0. - fragments: - - 5489-nonetype-in-get-vm-by-label.yml - - 5493-proxmox.yml - - 5506-onepassword_raw-missing-param.yml - - 5514-fix-logical-flaw-when-deleting-jenkins-build.yml - - 5543-dependent-template.yml - - 6.0.1.yml - release_date: '2022-11-15' - 6.1.0: - changes: - bugfixes: - - chroot connection plugin - add ``inventory_hostname`` to vars under ``remote_addr``. - This is needed for compatibility with ansible-core 2.13 (https://github.com/ansible-collections/community.general/pull/5570). - - cmd_runner module utils - fixed bug when handling default cases in ``cmd_runner_fmt.as_map()`` - (https://github.com/ansible-collections/community.general/pull/5538). - - cmd_runner module utils - formatting arguments ``cmd_runner_fmt.as_fixed()`` - was expecting an non-existing argument (https://github.com/ansible-collections/community.general/pull/5538). - - keycloak_client_rolemapping - calculate ``proposed`` and ``after`` return - values properly (https://github.com/ansible-collections/community.general/pull/5619). - - keycloak_client_rolemapping - remove only listed mappings with ``state=absent`` - (https://github.com/ansible-collections/community.general/pull/5619). - - proxmox inventory plugin - fix bug while templating when using templates for - the ``url``, ``user``, ``password``, ``token_id``, or ``token_secret`` options - (https://github.com/ansible-collections/community.general/pull/5640). - - proxmox inventory plugin - handle tags delimited by semicolon instead of comma, - which happens from Proxmox 7.3 on (https://github.com/ansible-collections/community.general/pull/5602). - - redhat_subscription - do not ignore ``consumer_name`` and other variables - if ``activationkey`` is specified (https://github.com/ansible-collections/community.general/issues/3486, - https://github.com/ansible-collections/community.general/pull/5627). - - redhat_subscription - do not pass arguments to ``subscription-manager register`` - for things already configured; now a specified ``rhsm_baseurl`` is properly - set for subscription-manager (https://github.com/ansible-collections/community.general/pull/5583). - - unixy callback plugin - fix plugin to work with ansible-core 2.14 by using - Ansible's configuration manager for handling options (https://github.com/ansible-collections/community.general/issues/5600). - - vdo - now uses ``yaml.safe_load()`` to parse command output instead of the - deprecated ``yaml.load()`` which is potentially unsafe. Using ``yaml.load()`` - without explicitely setting a ``Loader=`` is also an error in pyYAML 6.0 (https://github.com/ansible-collections/community.general/pull/5632). - - vmadm - fix for index out of range error in ``get_vm_uuid`` (https://github.com/ansible-collections/community.general/pull/5628). - deprecated_features: - - The ``sap`` modules ``sapcar_extract``, ``sap_task_list_execute``, and ``hana_query``, - will be removed from this collection in community.general 7.0.0 and replaced - with redirects to ``community.sap_libs``. If you want to continue using these - modules, make sure to also install ``community.sap_libs`` (it is part of the - Ansible package) (https://github.com/ansible-collections/community.general/pull/5614). - minor_changes: - - cmd_runner module utils - ``cmd_runner_fmt.as_bool()`` can now take an extra - parameter to format when value is false (https://github.com/ansible-collections/community.general/pull/5647). - - gconftool2 - refactor using ``ModuleHelper`` and ``CmdRunner`` (https://github.com/ansible-collections/community.general/pull/5545). - - java_certs - add more detailed error output when extracting certificate from - PKCS12 fails (https://github.com/ansible-collections/community.general/pull/5550). - - jenkins_plugin - refactor code to module util to fix sanity check (https://github.com/ansible-collections/community.general/pull/5565). - - lxd_project - refactored code out to module utils to clear sanity check (https://github.com/ansible-collections/community.general/pull/5549). - - nmap inventory plugin - add new options ``udp_scan``, ``icmp_timestamp``, - and ``dns_resolve`` for different types of scans (https://github.com/ansible-collections/community.general/pull/5566). - - rax_scaling_group - refactored out code to the ``rax`` module utils to clear - the sanity check (https://github.com/ansible-collections/community.general/pull/5563). - - redfish_command - add ``PerformRequestedOperations`` command to perform any - operations necessary to continue the update flow (https://github.com/ansible-collections/community.general/issues/4276). - - redfish_command - add ``update_apply_time`` to ``SimpleUpdate`` command (https://github.com/ansible-collections/community.general/issues/3910). - - redfish_command - add ``update_status`` to output of ``SimpleUpdate`` command - to allow a user monitor the update in progress (https://github.com/ansible-collections/community.general/issues/4276). - - redfish_info - add ``GetUpdateStatus`` command to check the progress of a - previous update request (https://github.com/ansible-collections/community.general/issues/4276). - - redfish_utils module utils - added PUT (``put_request()``) functionality (https://github.com/ansible-collections/community.general/pull/5490). - - slack - add option ``prepend_hash`` which allows to control whether a ``#`` - is prepended to ``channel_id``. The current behavior (value ``auto``) is to - prepend ``#`` unless some specific prefixes are found. That list of prefixes - is incomplete, and there does not seem to exist a documented condition on - when exactly ``#`` must not be prepended. We recommend to explicitly set ``prepend_hash=always`` - or ``prepend_hash=never`` to avoid any ambiguity (https://github.com/ansible-collections/community.general/pull/5629). - - spotinst_aws_elastigroup - add ``elements`` attribute when missing in ``list`` - parameters (https://github.com/ansible-collections/community.general/pull/5553). - - ssh_config - add ``host_key_algorithms`` option (https://github.com/ansible-collections/community.general/pull/5605). - - udm_share - added ``elements`` attribute to ``list`` type parameters (https://github.com/ansible-collections/community.general/pull/5557). - - udm_user - add ``elements`` attribute when missing in ``list`` parameters - (https://github.com/ansible-collections/community.general/pull/5559). - release_summary: Regular bugfix and feature release. - fragments: - - 3910-redfish-add-operation-apply-time-to-simple-update.yml - - 4276-redfish-command-updates-for-full-simple-update-workflow.yml - - 5490-adding-put-functionality.yml - - 5538-cmd-runner-as-fixed.yml - - 5545-gconftool-cmd-runner.yml - - 5549-lxd-project-sanity.yml - - 5550-java_certs-not-enough-info-on-error.yml - - 5553-spotinst-aws-elasticgroup-sanity.yml - - 5557-udm-share-sanity.yml - - 5559-udm-user-sanity.yml - - 5563-rax-scaling-group-sanity.yml - - 5565-jenkins-plugin-sanity.yml - - 5566-additional-flags-nmap.yml - - 5570-chroot-plugin-fix-default-inventory_hostname.yml - - 5583-redhat_subscription-subscribe-parameters.yaml - - 5601-unixy-callback-use-config-manager.yml - - 5602-proxmox-tags.yml - - 5605-ssh-config-add-host-key-algorithms.yaml - - 5619-keycloak-improvements.yml - - 5627-redhat_subscription-subscribe-parameters-2.yaml - - 5628-fix-vmadm-off-by-one.yml - - 5629-add-prepend-hash-option-for-channel-id.yml - - 5632-vdo-Use-yaml-safe-load-instead-of-yaml-load.yml - - 5640-fix-typo-proxmox-inventory.yml - - 5647-cmd-runner-as-bool-false.yml - - 6.1.0.yml - - sap-removal.yml - modules: - - description: Manage project badges on GitLab Server - name: gitlab_project_badge - namespace: '' - - description: Retrieve client secret via Keycloak API - name: keycloak_clientsecret_info - namespace: '' - - description: Regenerate Keycloak client secret via Keycloak API - name: keycloak_clientsecret_regenerate - namespace: '' - release_date: '2022-12-06' - 6.2.0: - changes: - bugfixes: - - ansible_galaxy_install - set default to raise exception if command's return - code is different from zero (https://github.com/ansible-collections/community.general/pull/5680). - - ansible_galaxy_install - try ``C.UTF-8`` and then fall back to ``en_US.UTF-8`` - before failing (https://github.com/ansible-collections/community.general/pull/5680). - - gitlab_group_variables - fix dropping variables accidentally when GitLab introduced - new properties (https://github.com/ansible-collections/community.general/pull/5667). - - gitlab_project_variables - fix dropping variables accidentally when GitLab - introduced new properties (https://github.com/ansible-collections/community.general/pull/5667). - - lxc_container - fix the arguments of the lxc command which broke the creation - and cloning of containers (https://github.com/ansible-collections/community.general/issues/5578). - - opkg - fix issue that ``force=reinstall`` would not reinstall an existing - package (https://github.com/ansible-collections/community.general/pull/5705). - - proxmox_disk - fixed possible issues with redundant ``vmid`` parameter (https://github.com/ansible-collections/community.general/issues/5492, - https://github.com/ansible-collections/community.general/pull/5672). - - proxmox_nic - fixed possible issues with redundant ``vmid`` parameter (https://github.com/ansible-collections/community.general/issues/5492, - https://github.com/ansible-collections/community.general/pull/5672). - - unixy callback plugin - fix typo introduced when updating to use Ansible's - configuration manager for handling options (https://github.com/ansible-collections/community.general/issues/5600). - deprecated_features: - - manageiq_policies - deprecate ``state=list`` in favour of using ``community.general.manageiq_policies_info`` - (https://github.com/ansible-collections/community.general/pull/5721). - - rax - module relies on deprecates library ``pyrax``. Unless maintainers step - up to work on the module, it will be marked as deprecated in community.general - 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). - - rax_cbs - module relies on deprecates library ``pyrax``. Unless maintainers - step up to work on the module, it will be marked as deprecated in community.general - 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). - - rax_cbs_attachments - module relies on deprecates library ``pyrax``. Unless - maintainers step up to work on the module, it will be marked as deprecated - in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). - - rax_cdb - module relies on deprecates library ``pyrax``. Unless maintainers - step up to work on the module, it will be marked as deprecated in community.general - 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). - - rax_cdb_database - module relies on deprecates library ``pyrax``. Unless maintainers - step up to work on the module, it will be marked as deprecated in community.general - 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). - - rax_cdb_user - module relies on deprecates library ``pyrax``. Unless maintainers - step up to work on the module, it will be marked as deprecated in community.general - 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). - - rax_clb - module relies on deprecates library ``pyrax``. Unless maintainers - step up to work on the module, it will be marked as deprecated in community.general - 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). - - rax_clb_nodes - module relies on deprecates library ``pyrax``. Unless maintainers - step up to work on the module, it will be marked as deprecated in community.general - 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). - - rax_clb_ssl - module relies on deprecates library ``pyrax``. Unless maintainers - step up to work on the module, it will be marked as deprecated in community.general - 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). - - rax_dns - module relies on deprecates library ``pyrax``. Unless maintainers - step up to work on the module, it will be marked as deprecated in community.general - 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). - - rax_dns_record - module relies on deprecates library ``pyrax``. Unless maintainers - step up to work on the module, it will be marked as deprecated in community.general - 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). - - rax_facts - module relies on deprecates library ``pyrax``. Unless maintainers - step up to work on the module, it will be marked as deprecated in community.general - 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). - - rax_files - module relies on deprecates library ``pyrax``. Unless maintainers - step up to work on the module, it will be marked as deprecated in community.general - 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). - - rax_files_objects - module relies on deprecates library ``pyrax``. Unless - maintainers step up to work on the module, it will be marked as deprecated - in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). - - rax_identity - module relies on deprecates library ``pyrax``. Unless maintainers - step up to work on the module, it will be marked as deprecated in community.general - 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). - - rax_keypair - module relies on deprecates library ``pyrax``. Unless maintainers - step up to work on the module, it will be marked as deprecated in community.general - 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). - - rax_meta - module relies on deprecates library ``pyrax``. Unless maintainers - step up to work on the module, it will be marked as deprecated in community.general - 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). - - rax_mon_alarm - module relies on deprecates library ``pyrax``. Unless maintainers - step up to work on the module, it will be marked as deprecated in community.general - 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). - - rax_mon_check - module relies on deprecates library ``pyrax``. Unless maintainers - step up to work on the module, it will be marked as deprecated in community.general - 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). - - rax_mon_entity - module relies on deprecates library ``pyrax``. Unless maintainers - step up to work on the module, it will be marked as deprecated in community.general - 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). - - rax_mon_notification - module relies on deprecates library ``pyrax``. Unless - maintainers step up to work on the module, it will be marked as deprecated - in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). - - rax_mon_notification_plan - module relies on deprecates library ``pyrax``. - Unless maintainers step up to work on the module, it will be marked as deprecated - in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). - - rax_network - module relies on deprecates library ``pyrax``. Unless maintainers - step up to work on the module, it will be marked as deprecated in community.general - 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). - - rax_queue - module relies on deprecates library ``pyrax``. Unless maintainers - step up to work on the module, it will be marked as deprecated in community.general - 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). - - rax_scaling_group - module relies on deprecates library ``pyrax``. Unless - maintainers step up to work on the module, it will be marked as deprecated - in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). - - rax_scaling_policy - module relies on deprecates library ``pyrax``. Unless - maintainers step up to work on the module, it will be marked as deprecated - in community.general 7.0.0 and removed in version 9.0.0 (https://github.com/ansible-collections/community.general/pull/5733). - minor_changes: - - opkg - allow installing a package in a certain version (https://github.com/ansible-collections/community.general/pull/5688). - - proxmox - added new module parameter ``tags`` for use with PVE 7+ (https://github.com/ansible-collections/community.general/pull/5714). - - puppet - refactored module to use ``CmdRunner`` for executing ``puppet`` (https://github.com/ansible-collections/community.general/pull/5612). - - redhat_subscription - add a ``server_proxy_scheme`` parameter to configure - the scheme for the proxy server (https://github.com/ansible-collections/community.general/pull/5662). - - ssh_config - refactor code to module util to fix sanity check (https://github.com/ansible-collections/community.general/pull/5720). - - sudoers - adds ``host`` parameter for setting hostname restrictions in sudoers - rules (https://github.com/ansible-collections/community.general/issues/5702). - release_summary: Regular bugfix and feature release. - fragments: - - 5612-puppet-cmd-runner.yml - - 5659-fix-lxc_container-command.yml - - 5662-redhat_subscription-server_proxy_scheme.yaml - - 5666-gitlab-variables.yml - - 5672-proxmox.yml - - 5680-ansible_galaxy_install-fx-locale.yaml - - 5688-opkg-module-install-certain-version.yml - - 5703-sudoers-host-support.yml - - 5705-opkg-fix-force-reinstall.yml - - 5714-proxmox-lxc-tag-support.yml - - 5720-ssh_config-plugin-sanity.yml - - 5721-manageiq-policies-deprecate-list-state.yaml - - 5733-rax-deprecation-notice.yml - - 5744-unixy-callback-fix-config-manager-typo.yml - - 6.2.0.yml - release_date: '2023-01-04' - 6.3.0: - changes: - breaking_changes: - - 'ModuleHelper module utils - when the module sets output variables named ``msg``, - ``exception``, ``output``, ``vars``, or ``changed``, the actual output will - prefix those names with ``_`` (underscore symbol) only when they clash with - output variables generated by ModuleHelper itself, which only occurs when - handling exceptions. Please note that this breaking change does not require - a new major release since before this release, it was not possible to add - such variables to the output `due to a bug `__ - (https://github.com/ansible-collections/community.general/pull/5765). - - ' - bugfixes: - - ModuleHelper - fix bug when adjusting the name of reserved output variables - (https://github.com/ansible-collections/community.general/pull/5755). - - alternatives - support subcommands on Fedora 37, which uses ``follower`` instead - of ``slave`` (https://github.com/ansible-collections/community.general/pull/5794). - - bitwarden lookup plugin - clarify what to do, if the bitwarden vault is not - unlocked (https://github.com/ansible-collections/community.general/pull/5811). - - dig lookup plugin - correctly handle DNSKEY record type's ``algorithm`` field - (https://github.com/ansible-collections/community.general/pull/5914). - - gem - fix force parameter not being passed to gem command when uninstalling - (https://github.com/ansible-collections/community.general/pull/5822). - - gem - fix hang due to interactive prompt for confirmation on specific version - uninstall (https://github.com/ansible-collections/community.general/pull/5751). - - gitlab_deploy_key - also update ``title`` and not just ``can_push`` (https://github.com/ansible-collections/community.general/pull/5888). - - keycloak_user_federation - fixes federation creation issue. When a new federation - was created and at the same time a default / standard mapper was also changed - / updated the creation process failed as a bad None set variable led to a - bad malformed url request (https://github.com/ansible-collections/community.general/pull/5750). - - 'keycloak_user_federation - fixes idempotency detection issues. In some cases - the module could fail to properly detect already existing user federations - because of a buggy seemingly superflous extra query parameter (https://github.com/ansible-collections/community.general/pull/5732). - - ' - - loganalytics callback plugin - adjust type of callback to ``notification``, - it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761). - - logdna callback plugin - adjust type of callback to ``notification``, it was - incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761). - - logstash callback plugin - adjust type of callback to ``notification``, it - was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761). - - nsupdate - fix zone lookup. The SOA record for an existing zone is returned - as an answer RR and not as an authority RR (https://github.com/ansible-collections/community.general/issues/5817, - https://github.com/ansible-collections/community.general/pull/5818). - - proxmox_disk - fixed issue with read timeout on import action (https://github.com/ansible-collections/community.general/pull/5803). - - redfish_utils - removed basic auth HTTP header when performing a GET on the - service root resource and when performing a POST to the session collection - (https://github.com/ansible-collections/community.general/issues/5886). - - splunk callback plugin - adjust type of callback to ``notification``, it was - incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761). - - sumologic callback plugin - adjust type of callback to ``notification``, it - was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761). - - syslog_json callback plugin - adjust type of callback to ``notification``, - it was incorrectly classified as ``aggregate`` before (https://github.com/ansible-collections/community.general/pull/5761). - - terraform - fix ``current`` workspace never getting appended to the ``all`` - key in the ``workspace_ctf`` object (https://github.com/ansible-collections/community.general/pull/5735). - - terraform - fix ``terraform init`` failure when there are multiple workspaces - on the remote backend and when ``default`` workspace is missing by setting - ``TF_WORKSPACE`` environmental variable to the value of ``workspace`` when - used (https://github.com/ansible-collections/community.general/pull/5735). - - terraform module - disable ANSI escape sequences during validation phase (https://github.com/ansible-collections/community.general/pull/5843). - - xml - fixed a bug where empty ``children`` list would not be set (https://github.com/ansible-collections/community.general/pull/5808). - deprecated_features: - - consul - deprecate using parameters unused for ``state=absent`` (https://github.com/ansible-collections/community.general/pull/5772). - - gitlab_runner - the default of the new option ``access_level_on_creation`` - will change from ``false`` to ``true`` in community.general 7.0.0. This will - cause ``access_level`` to be used during runner registration as well, and - not only during updates (https://github.com/ansible-collections/community.general/pull/5908). - minor_changes: - - apache2_module - add module argument ``warn_mpm_absent`` to control whether - warning are raised in some edge cases (https://github.com/ansible-collections/community.general/pull/5793). - - bitwarden lookup plugin - can now retrieve secrets from custom fields (https://github.com/ansible-collections/community.general/pull/5694). - - bitwarden lookup plugin - implement filtering results by ``collection_id`` - parameter (https://github.com/ansible-collections/community.general/issues/5849). - - dig lookup plugin - support CAA record type (https://github.com/ansible-collections/community.general/pull/5913). - - gitlab_project - add ``builds_access_level``, ``container_registry_access_level`` - and ``forking_access_level`` options (https://github.com/ansible-collections/community.general/pull/5706). - - gitlab_runner - add new boolean option ``access_level_on_creation``. It controls, - whether the value of ``access_level`` is used for runner registration or not. - The option ``access_level`` has been ignored on registration so far and was - only used on updates (https://github.com/ansible-collections/community.general/issues/5907, - https://github.com/ansible-collections/community.general/pull/5908). - - ilo_redfish_utils module utils - change implementation of DNS Server IP and - NTP Server IP update (https://github.com/ansible-collections/community.general/pull/5804). - - ipa_group - allow to add and remove external users with the ``external_user`` - option (https://github.com/ansible-collections/community.general/pull/5897). - - iptables_state - minor refactoring within the module (https://github.com/ansible-collections/community.general/pull/5844). - - one_vm - add a new ``updateconf`` option which implements the ``one.vm.updateconf`` - API call (https://github.com/ansible-collections/community.general/pull/5812). - - opkg - refactored module to use ``CmdRunner`` for executing ``opkg`` (https://github.com/ansible-collections/community.general/pull/5718). - - redhat_subscription - adds ``token`` parameter for subscription-manager authentication - using Red Hat API token (https://github.com/ansible-collections/community.general/pull/5725). - - snap - minor refactor when executing module (https://github.com/ansible-collections/community.general/pull/5773). - - snap_alias - refactored module to use ``CmdRunner`` to execute ``snap`` (https://github.com/ansible-collections/community.general/pull/5486). - - sudoers - add ``setenv`` parameters to support passing environment variables - via sudo. (https://github.com/ansible-collections/community.general/pull/5883) - release_summary: Regular bugfix and feature release. - fragments: - - 5486-snap-alias-cmd-runner.yml - - 5694-add-custom-fields-to-bitwarden.yml - - 5706-add-builds-forks-container-registry.yml - - 5718-opkg-refactor.yaml - - 5725-redhat_subscription-add-red-hat-api-token.yml - - 5732-bugfix-keycloak-userfed-idempotency.yml - - 5735-terraform-init-fix-when-default-workspace-doesnt-exists.yaml - - 5750-bugfixing-keycloak-usrfed-fail-when-update-default-mapper-simultaneously.yml - - 5751-gem-fix-uninstall-hang.yml - - 5755-mh-fix-output-conflict.yml - - 5761-callback-types.yml - - 5765-mh-lax-output-conflict.yml - - 5772-consul-deprecate-params-when-absent.yml - - 5773-snap-mh-execute.yml - - 5793-apache2-module-npm-warnings.yml - - 5794-alternatives-fedora37.yml - - 5803-proxmox-read-timeout.yml - - 5804-minor-changes-to-hpe-ilo-collection.yml - - 5808-xml-children-parameter-does-not-exist.yml - - 5811-clarify-bitwarden-error.yml - - 5812-implement-updateconf-api-call.yml - - 5818-nsupdate-fix-zone-lookup.yml - - 5822-gem-uninstall-force.yml - - 5843-terraform-validate-no-color.yml - - 5844-iptables-state-refactor.yml - - 5851-lookup-bitwarden-add-filter-by-collection-id-parameter.yml - - 5883-sudoers-add-support-for-setenv-parameter.yml - - 5886-redfish-correct-basic-auth-usage-on-session-creation.yml - - 5888-update-key-title.yml - - 5897-ipa_group-add-external-users.yml - - 5907-fix-gitlab_runner-not-idempotent.yml - - 5913-dig-caa.yml - - 5914-dig-dnskey.yml - - 6.3.0.yml - modules: - - description: Manages Out-Of-Band controllers using Open Composable API (OCAPI) - name: ocapi_command - namespace: '' - - description: Manages Out-Of-Band controllers using Open Composable API (OCAPI) - name: ocapi_info - namespace: '' - release_date: '2023-01-31' +--- +ancestor: 11.0.0 +releases: {} diff --git a/changelogs/config.yaml b/changelogs/config.yaml index 52e101e11f..578b8c3765 100644 --- a/changelogs/config.yaml +++ b/changelogs/config.yaml @@ -7,28 +7,37 @@ changelog_filename_template: ../CHANGELOG.rst changelog_filename_version_depth: 0 changes_file: changelog.yaml changes_format: combined +ignore_other_fragment_extensions: true keep_fragments: false mention_ancestor: true -flatmap: true new_plugins_after_name: removed_features notesdir: fragments +output_formats: + - md + - rst prelude_section_name: release_summary prelude_section_title: Release Summary sections: -- - major_changes - - Major Changes -- - minor_changes - - Minor Changes -- - breaking_changes - - Breaking Changes / Porting Guide -- - deprecated_features - - Deprecated Features -- - removed_features - - Removed Features (previously deprecated) -- - security_fixes - - Security Fixes -- - bugfixes - - Bugfixes -- - known_issues - - Known Issues + - - major_changes + - Major Changes + - - minor_changes + - Minor Changes + - - breaking_changes + - Breaking Changes / Porting Guide + - - deprecated_features + - Deprecated Features + - - removed_features + - Removed Features (previously deprecated) + - - security_fixes + - Security Fixes + - - bugfixes + - Bugfixes + - - known_issues + - Known Issues title: Community General +trivial_section_name: trivial +use_fqcn: true +add_plugin_period: true +changelog_nice_yaml: true +changelog_sort: version +vcs: auto diff --git a/changelogs/fragments/10227-pacemaker-cluster-and-resource-enhancement.yml b/changelogs/fragments/10227-pacemaker-cluster-and-resource-enhancement.yml new file mode 100644 index 0000000000..d1cfee7816 --- /dev/null +++ b/changelogs/fragments/10227-pacemaker-cluster-and-resource-enhancement.yml @@ -0,0 +1,7 @@ +deprecated_features: + - pacemaker_cluster - the parameter ``state`` will become a required parameter in community.general 12.0.0 (https://github.com/ansible-collections/community.general/pull/10227). + +minor_changes: + - pacemaker_cluster - add ``state=maintenance`` for managing pacemaker maintenance mode (https://github.com/ansible-collections/community.general/issues/10200, https://github.com/ansible-collections/community.general/pull/10227). + - pacemaker_cluster - rename ``node`` to ``name`` and add ``node`` alias (https://github.com/ansible-collections/community.general/pull/10227). + - pacemaker_resource - enhance module by removing duplicative code (https://github.com/ansible-collections/community.general/pull/10227). diff --git a/changelogs/fragments/10231-keycloak-add-client-credentials-authentication.yml b/changelogs/fragments/10231-keycloak-add-client-credentials-authentication.yml new file mode 100644 index 0000000000..eec12e8669 --- /dev/null +++ b/changelogs/fragments/10231-keycloak-add-client-credentials-authentication.yml @@ -0,0 +1,2 @@ +minor_changes: + - keycloak - add support for ``grant_type=client_credentials`` to all keycloak modules, so that specifying ``auth_client_id`` and ``auth_client_secret`` is sufficient for authentication (https://github.com/ansible-collections/community.general/pull/10231). diff --git a/changelogs/fragments/10267-add-cloudflare-ptr-record-support.yml b/changelogs/fragments/10267-add-cloudflare-ptr-record-support.yml new file mode 100644 index 0000000000..29d71ca393 --- /dev/null +++ b/changelogs/fragments/10267-add-cloudflare-ptr-record-support.yml @@ -0,0 +1,2 @@ +minor_changes: + - cloudflare_dns - adds support for PTR records (https://github.com/ansible-collections/community.general/pull/10267). diff --git a/changelogs/fragments/10269-cloudflare-dns-refactor.yml b/changelogs/fragments/10269-cloudflare-dns-refactor.yml new file mode 100644 index 0000000000..9f91040d63 --- /dev/null +++ b/changelogs/fragments/10269-cloudflare-dns-refactor.yml @@ -0,0 +1,2 @@ +minor_changes: + - cloudflare_dns - simplify validations and refactor some code, no functional changes (https://github.com/ansible-collections/community.general/pull/10269). diff --git a/changelogs/fragments/10271--disable_lookups.yml b/changelogs/fragments/10271--disable_lookups.yml new file mode 100644 index 0000000000..d28e2ac833 --- /dev/null +++ b/changelogs/fragments/10271--disable_lookups.yml @@ -0,0 +1,3 @@ +bugfixes: + - "icinga2 inventory plugin - avoid using deprecated option when templating options (https://github.com/ansible-collections/community.general/pull/10271)." + - "linode inventory plugin - avoid using deprecated option when templating options (https://github.com/ansible-collections/community.general/pull/10271)." diff --git a/changelogs/fragments/10285-fstr-plugins.yml b/changelogs/fragments/10285-fstr-plugins.yml new file mode 100644 index 0000000000..6fff590fee --- /dev/null +++ b/changelogs/fragments/10285-fstr-plugins.yml @@ -0,0 +1,7 @@ +minor_changes: + - dense callback plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285). + - mail callback plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285). + - wsl connection plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285). + - jc filter plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285). + - iocage inventory plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285). + - xen_orchestra inventory plugin - use f-strings instead of concatenation (https://github.com/ansible-collections/community.general/pull/10285). diff --git a/changelogs/fragments/10299-github_app_access_token-lookup.yml b/changelogs/fragments/10299-github_app_access_token-lookup.yml new file mode 100644 index 0000000000..59233e2a05 --- /dev/null +++ b/changelogs/fragments/10299-github_app_access_token-lookup.yml @@ -0,0 +1,2 @@ +minor_changes: + - github_app_access_token lookup plugin - support both ``jwt`` and ``pyjwt`` to avoid conflict with other modules requirements (https://github.com/ansible-collections/community.general/issues/10299). diff --git a/changelogs/fragments/10311-xfconf-refactor.yml b/changelogs/fragments/10311-xfconf-refactor.yml new file mode 100644 index 0000000000..9d71bd17d8 --- /dev/null +++ b/changelogs/fragments/10311-xfconf-refactor.yml @@ -0,0 +1,2 @@ +minor_changes: + - xfconf - minor adjustments the the code (https://github.com/ansible-collections/community.general/pull/10311). diff --git a/changelogs/fragments/10323-nmcli-improvements.yml b/changelogs/fragments/10323-nmcli-improvements.yml new file mode 100644 index 0000000000..53436ea7d6 --- /dev/null +++ b/changelogs/fragments/10323-nmcli-improvements.yml @@ -0,0 +1,2 @@ +minor_changes: + - nmcli - simplify validations and refactor some code, no functional changes (https://github.com/ansible-collections/community.general/pull/10323). diff --git a/changelogs/fragments/10328-redundant-brackets.yml b/changelogs/fragments/10328-redundant-brackets.yml new file mode 100644 index 0000000000..f8f74a336c --- /dev/null +++ b/changelogs/fragments/10328-redundant-brackets.yml @@ -0,0 +1,32 @@ +minor_changes: + - logstash callback plugin - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - keycloak module utils - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - python_runner module utils - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - cloudflare_dns - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - crypttab - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - datadog_monitor - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - gitlab_deploy_key - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - gitlab_group_access_token - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - gitlab_hook - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - gitlab_project_access_token - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - gitlab_runner - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - ipa_group - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - jenkins_build - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - jenkins_build_info - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - nmcli - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - oneandone_firewall_policy - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - oneandone_load_balancer - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - oneandone_monitoring_policy - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - onepassword_info - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - osx_defaults - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - ovh_ip_loadbalancing_backend - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - packet_device - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - pagerduty - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - pingdom - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - rhevm - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - rocketchat - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - sensu_silence - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - sl_vm - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - urpmi - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - xattr - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). + - xml - remove redundant brackets in conditionals, no functional changes (https://github.com/ansible-collections/community.general/pull/10328). diff --git a/changelogs/fragments/10329-catapult-deprecation.yml b/changelogs/fragments/10329-catapult-deprecation.yml new file mode 100644 index 0000000000..5e5209edda --- /dev/null +++ b/changelogs/fragments/10329-catapult-deprecation.yml @@ -0,0 +1,2 @@ +deprecated_features: + - catapult - module is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/issues/10318, https://github.com/ansible-collections/community.general/pull/10329). diff --git a/changelogs/fragments/10339-github_app_access_token.yml b/changelogs/fragments/10339-github_app_access_token.yml new file mode 100644 index 0000000000..00cd71f559 --- /dev/null +++ b/changelogs/fragments/10339-github_app_access_token.yml @@ -0,0 +1,2 @@ +bugfixes: + - github_release - support multiple types of GitHub tokens; no longer failing when ``ghs_`` token type is provided (https://github.com/ansible-collections/community.general/issues/10338, https://github.com/ansible-collections/community.general/pull/10339). \ No newline at end of file diff --git a/changelogs/fragments/10346-jenkins-plugins-fixes.yml b/changelogs/fragments/10346-jenkins-plugins-fixes.yml new file mode 100644 index 0000000000..382fe7aa53 --- /dev/null +++ b/changelogs/fragments/10346-jenkins-plugins-fixes.yml @@ -0,0 +1,6 @@ +bugfixes: + - "jenkins_plugin - install latest compatible version instead of latest (https://github.com/ansible-collections/community.general/issues/854, https://github.com/ansible-collections/community.general/pull/10346)." + - "jenkins_plugin - separate Jenkins and external URL credentials (https://github.com/ansible-collections/community.general/issues/4419, https://github.com/ansible-collections/community.general/pull/10346)." + +minor_changes: + - "jenkins_plugin - install dependencies for specific version (https://github.com/ansible-collections/community.general/issue/4995, https://github.com/ansible-collections/community.general/pull/10346)." diff --git a/changelogs/fragments/10349-incus_connection-error-handling.yml b/changelogs/fragments/10349-incus_connection-error-handling.yml new file mode 100644 index 0000000000..b35da354d2 --- /dev/null +++ b/changelogs/fragments/10349-incus_connection-error-handling.yml @@ -0,0 +1,2 @@ +bugfixes: + - incus connection plugin - fix error handling to return more useful Ansible errors to the user (https://github.com/ansible-collections/community.general/issues/10344, https://github.com/ansible-collections/community.general/pull/10349). diff --git a/changelogs/fragments/10359-dependent.yml b/changelogs/fragments/10359-dependent.yml new file mode 100644 index 0000000000..e48a6142e8 --- /dev/null +++ b/changelogs/fragments/10359-dependent.yml @@ -0,0 +1,2 @@ +bugfixes: + - "dependent lookup plugin - avoid deprecated ansible-core 2.19 functionality (https://github.com/ansible-collections/community.general/pull/10359)." diff --git a/changelogs/fragments/10413-pacemaker-resource-cleanup.yml b/changelogs/fragments/10413-pacemaker-resource-cleanup.yml new file mode 100644 index 0000000000..f4157559cc --- /dev/null +++ b/changelogs/fragments/10413-pacemaker-resource-cleanup.yml @@ -0,0 +1,3 @@ +minor_changes: + - pacemaker_resource - add ``state=cleanup`` for cleaning up pacemaker resources (https://github.com/ansible-collections/community.general/pull/10413) + - pacemaker_resource - the parameter ``name`` is no longer a required parameter in community.general 11.3.0 (https://github.com/ansible-collections/community.general/pull/10413) diff --git a/changelogs/fragments/10415-keycloak-realm-brute-force-attributes.yml b/changelogs/fragments/10415-keycloak-realm-brute-force-attributes.yml new file mode 100644 index 0000000000..22433b584e --- /dev/null +++ b/changelogs/fragments/10415-keycloak-realm-brute-force-attributes.yml @@ -0,0 +1,2 @@ +minor_changes: + - keycloak_realm - add support for ``brute_force_strategy`` and ``max_temporary_lockouts`` (https://github.com/ansible-collections/community.general/issues/10412, https://github.com/ansible-collections/community.general/pull/10415). diff --git a/changelogs/fragments/10417-sysrc-refactor.yml b/changelogs/fragments/10417-sysrc-refactor.yml new file mode 100644 index 0000000000..b1b5db632b --- /dev/null +++ b/changelogs/fragments/10417-sysrc-refactor.yml @@ -0,0 +1,4 @@ +minor_changes: + - sysrc - adjustments to the code (https://github.com/ansible-collections/community.general/pull/10417). +bugfixes: + - sysrc - fixes parsing with multi-line variables (https://github.com/ansible-collections/community.general/issues/10394, https://github.com/ansible-collections/community.general/pull/10417). \ No newline at end of file diff --git a/changelogs/fragments/10422-tasks_only-result_format.yml b/changelogs/fragments/10422-tasks_only-result_format.yml new file mode 100644 index 0000000000..13e5e749bf --- /dev/null +++ b/changelogs/fragments/10422-tasks_only-result_format.yml @@ -0,0 +1,2 @@ +minor_changes: + - "tasks_only callback plugin - add ``result_format`` and ``pretty_results`` options similarly to the default callback (https://github.com/ansible-collections/community.general/pull/10422)." diff --git a/changelogs/fragments/10423-apache_module-condition.yml b/changelogs/fragments/10423-apache_module-condition.yml new file mode 100644 index 0000000000..9a30d06b4e --- /dev/null +++ b/changelogs/fragments/10423-apache_module-condition.yml @@ -0,0 +1,2 @@ +bugfixes: + - apache2_module - check the ``cgi`` module restrictions only during activation (https://github.com/ansible-collections/community.general/pull/10423). diff --git a/changelogs/fragments/10424-scaleway-update-zones.yml b/changelogs/fragments/10424-scaleway-update-zones.yml new file mode 100644 index 0000000000..ffa508cd3a --- /dev/null +++ b/changelogs/fragments/10424-scaleway-update-zones.yml @@ -0,0 +1,2 @@ +minor_changes: + - scaleway_* modules, scaleway inventory plugin - update available zones and API URLs (https://github.com/ansible-collections/community.general/issues/10383, https://github.com/ansible-collections/community.general/pull/10424). \ No newline at end of file diff --git a/changelogs/fragments/10434-cpanm-deprecate-compat-mode.yml b/changelogs/fragments/10434-cpanm-deprecate-compat-mode.yml new file mode 100644 index 0000000000..84b6ecf471 --- /dev/null +++ b/changelogs/fragments/10434-cpanm-deprecate-compat-mode.yml @@ -0,0 +1,2 @@ +deprecated_features: + - cpanm - deprecate ``mode=compatibility``, ``mode=new`` should be used instead (https://github.com/ansible-collections/community.general/pull/10434). diff --git a/changelogs/fragments/10435-github-repo-deprecate-force-defaults.yml b/changelogs/fragments/10435-github-repo-deprecate-force-defaults.yml new file mode 100644 index 0000000000..cccb3a4c5f --- /dev/null +++ b/changelogs/fragments/10435-github-repo-deprecate-force-defaults.yml @@ -0,0 +1,2 @@ +deprecated_features: + - github_repo - deprecate ``force_defaults=true`` (https://github.com/ansible-collections/community.general/pull/10435). diff --git a/changelogs/fragments/10442-apk-fix-empty-names.yml b/changelogs/fragments/10442-apk-fix-empty-names.yml new file mode 100644 index 0000000000..24d68b52df --- /dev/null +++ b/changelogs/fragments/10442-apk-fix-empty-names.yml @@ -0,0 +1,3 @@ +bugfixes: + - apk - handle empty name strings properly + (https://github.com/ansible-collections/community.general/issues/10441, https://github.com/ansible-collections/community.general/pull/10442). \ No newline at end of file diff --git a/changelogs/fragments/10445-cronvar-reject-empty-values.yml b/changelogs/fragments/10445-cronvar-reject-empty-values.yml new file mode 100644 index 0000000000..1bf39619cc --- /dev/null +++ b/changelogs/fragments/10445-cronvar-reject-empty-values.yml @@ -0,0 +1,2 @@ +bugfixes: + - "cronvar - handle empty strings on ``value`` properly (https://github.com/ansible-collections/community.general/issues/10439, https://github.com/ansible-collections/community.general/pull/10445)." diff --git a/changelogs/fragments/10455-capabilities-improve-error-detection.yml b/changelogs/fragments/10455-capabilities-improve-error-detection.yml new file mode 100644 index 0000000000..40337a424b --- /dev/null +++ b/changelogs/fragments/10455-capabilities-improve-error-detection.yml @@ -0,0 +1,2 @@ +bugfixes: + - capabilities - using invalid path (symlink/directory/...) returned unrelated and incoherent error messages (https://github.com/ansible-collections/community.general/issues/5649, https://github.com/ansible-collections/community.general/pull/10455). \ No newline at end of file diff --git a/changelogs/fragments/10458-listen_port_facts-prevent-type-error.yml b/changelogs/fragments/10458-listen_port_facts-prevent-type-error.yml new file mode 100644 index 0000000000..70af0932b3 --- /dev/null +++ b/changelogs/fragments/10458-listen_port_facts-prevent-type-error.yml @@ -0,0 +1,2 @@ +bugfixes: + - "listen_port_facts - avoid crash when required commands are missing (https://github.com/ansible-collections/community.general/issues/10457, https://github.com/ansible-collections/community.general/pull/10458)." \ No newline at end of file diff --git a/changelogs/fragments/10459-deprecations.yml b/changelogs/fragments/10459-deprecations.yml new file mode 100644 index 0000000000..4b3f317454 --- /dev/null +++ b/changelogs/fragments/10459-deprecations.yml @@ -0,0 +1,6 @@ +bugfixes: + - "apache2_module - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)." + - "htpasswd - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)." + - "syspatch - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)." + - "sysupgrade - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)." + - "zypper_repository - avoid ansible-core 2.19 deprecation (https://github.com/ansible-collections/community.general/pull/10459)." diff --git a/changelogs/fragments/10461-cronvar-non-existent-dir-crash-fix.yml b/changelogs/fragments/10461-cronvar-non-existent-dir-crash-fix.yml new file mode 100644 index 0000000000..c4b77299f5 --- /dev/null +++ b/changelogs/fragments/10461-cronvar-non-existent-dir-crash-fix.yml @@ -0,0 +1,2 @@ +bugfixes: + - "cronvar - fix crash on missing ``cron_file`` parent directories (https://github.com/ansible-collections/community.general/issues/10460, https://github.com/ansible-collections/community.general/pull/10461)." diff --git a/changelogs/fragments/10483-sensu-subscription-quotes.yml b/changelogs/fragments/10483-sensu-subscription-quotes.yml new file mode 100644 index 0000000000..355099684c --- /dev/null +++ b/changelogs/fragments/10483-sensu-subscription-quotes.yml @@ -0,0 +1,2 @@ +minor_changes: + - sensu_subscription - normalize quotes in the module output (https://github.com/ansible-collections/community.general/pull/10483). diff --git a/changelogs/fragments/10490-rocketchat.yml b/changelogs/fragments/10490-rocketchat.yml new file mode 100644 index 0000000000..73657ba67c --- /dev/null +++ b/changelogs/fragments/10490-rocketchat.yml @@ -0,0 +1,3 @@ +deprecated_features: + - "rocketchat - the default value for ``is_pre740``, currently ``true``, is deprecated and will change to ``false`` in community.general 13.0.0 + (https://github.com/ansible-collections/community.general/pull/10490)." diff --git a/changelogs/fragments/10491-irc.yml b/changelogs/fragments/10491-irc.yml new file mode 100644 index 0000000000..74867e71a7 --- /dev/null +++ b/changelogs/fragments/10491-irc.yml @@ -0,0 +1,2 @@ +bugfixes: + - "irc - pass hostname to ``wrap_socket()`` if ``use_tls=true`` and ``validate_certs=true`` (https://github.com/ansible-collections/community.general/issues/10472, https://github.com/ansible-collections/community.general/pull/10491)." diff --git a/changelogs/fragments/10493-nagios-services.yml b/changelogs/fragments/10493-nagios-services.yml new file mode 100644 index 0000000000..3a04556c68 --- /dev/null +++ b/changelogs/fragments/10493-nagios-services.yml @@ -0,0 +1,2 @@ +minor_changes: + - nagios - make parameter ``services`` a ``list`` instead of a ``str`` (https://github.com/ansible-collections/community.general/pull/10493). diff --git a/changelogs/fragments/10494-rfdn-1.yml b/changelogs/fragments/10494-rfdn-1.yml new file mode 100644 index 0000000000..09a0c442b0 --- /dev/null +++ b/changelogs/fragments/10494-rfdn-1.yml @@ -0,0 +1,27 @@ +minor_changes: + - aerospike_migrations - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - airbrake_deployment - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - bigpanda - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - bootc_manage - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - bower - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - btrfs_subvolume - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - bundler - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - campfire - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - cargo - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - catapult - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - cisco_webex - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - consul_kv - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - consul_policy - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - copr - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - datadog_downtime - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - datadog_monitor - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - dconf - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - dimensiondata_network - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - dimensiondata_vlan - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - dnf_config_manager - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - dnsmadeeasy - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - dpkg_divert - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - easy_install - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - elasticsearch_plugin - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - facter - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). + - filesystem - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10494). diff --git a/changelogs/fragments/10505-rfdn-2.yml b/changelogs/fragments/10505-rfdn-2.yml new file mode 100644 index 0000000000..89aeab9356 --- /dev/null +++ b/changelogs/fragments/10505-rfdn-2.yml @@ -0,0 +1,39 @@ +minor_changes: + - gem - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - git_config_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - github_deploy_key - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - github_repo - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - github_webhook - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - github_webhook_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_branch - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_group_access_token - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_group_variable - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_hook - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_instance_variable - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_issue - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_label - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_merge_request - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_milestone - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_project - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_project_access_token - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - gitlab_project_variable - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - grove - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - hg - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - homebrew - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - homebrew_cask - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - homebrew_tap - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - honeybadger_deployment - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - htpasswd - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - icinga2_host - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - influxdb_user - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - ini_file - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - ipa_dnsrecord - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - ipa_dnszone - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - ipa_service - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - ipbase_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - ipwcli_dns - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - irc - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - jabber - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - jenkins_credential - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - jenkins_job - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). + - jenkins_script - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10505). diff --git a/changelogs/fragments/10507-rfdn-3.yml b/changelogs/fragments/10507-rfdn-3.yml new file mode 100644 index 0000000000..fae9d118bc --- /dev/null +++ b/changelogs/fragments/10507-rfdn-3.yml @@ -0,0 +1,35 @@ +minor_changes: + - keycloak_authz_authorization_scope - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - keycloak_authz_permission - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - keycloak_role - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - keycloak_userprofile - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - keyring - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - kibana_plugin - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - layman - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - ldap_attrs - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - ldap_inc - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - librato_annotation - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - lldp - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - logentries - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - lxca_cmms - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - lxca_nodes - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - macports - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - mail - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - manageiq_alerts - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - manageiq_group - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - manageiq_policies - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - manageiq_policies_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - manageiq_tags - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - manageiq_tenant - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - matrix - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - mattermost - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - maven_artifact - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - memset_dns_reload - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - memset_zone - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - memset_zone_record - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - mqtt - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - mssql_db - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - mssql_script - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - netcup_dns - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - newrelic_deployment - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). + - nsupdate - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10507). diff --git a/changelogs/fragments/10512-rfdn-4.yml b/changelogs/fragments/10512-rfdn-4.yml new file mode 100644 index 0000000000..6d8f9e7d77 --- /dev/null +++ b/changelogs/fragments/10512-rfdn-4.yml @@ -0,0 +1,42 @@ +minor_changes: + - oci_vcn - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - one_image_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - one_template - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - one_vnet - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - onepassword_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - oneview_fc_network_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - opendj_backendprop - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - ovh_monthly_billing - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pagerduty - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pagerduty_change - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pagerduty_user - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pam_limits - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pear - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pkgng - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pnpm - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - portage - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pritunl_org - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pritunl_org_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pritunl_user - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pritunl_user_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pubnub_blocks - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pushbullet - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - pushover - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - redis_data - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - redis_data_incr - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - riak - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - rocketchat - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - rollbar_deployment - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - say - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - scaleway_database_backup - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - sendgrid - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - sensu_silence - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - sorcery - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - ssh_config - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - statusio_maintenance - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - svr4pkg - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - swdepot - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - syslogger - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - sysrc - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - systemd_creds_decrypt - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). + - systemd_creds_encrypt - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10512). diff --git a/changelogs/fragments/10513-rfdn-5.yml b/changelogs/fragments/10513-rfdn-5.yml new file mode 100644 index 0000000000..d930d7345c --- /dev/null +++ b/changelogs/fragments/10513-rfdn-5.yml @@ -0,0 +1,18 @@ +minor_changes: + - taiga_issue - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - twilio - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_aaa_group - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_ca_host_key_cert - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_dns_host - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_network_interface_address - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_proxy_auth_profile - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_proxy_exception - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_proxy_frontend - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - utm_proxy_location - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - vertica_configuration - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - vertica_info - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - vertica_role - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - xbps - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - yarn - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - zypper - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). + - zypper_repository - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10513). diff --git a/changelogs/fragments/10514-deprecate-bearychat.yml b/changelogs/fragments/10514-deprecate-bearychat.yml new file mode 100644 index 0000000000..202210ac8c --- /dev/null +++ b/changelogs/fragments/10514-deprecate-bearychat.yml @@ -0,0 +1,2 @@ +deprecated_features: + - bearychat - module is deprecated and will be removed in community.general 12.0.0 (https://github.com/ansible-collections/community.general/issues/10514). diff --git a/changelogs/fragments/10520-arg-runcommand-list.yml b/changelogs/fragments/10520-arg-runcommand-list.yml new file mode 100644 index 0000000000..4479b3a694 --- /dev/null +++ b/changelogs/fragments/10520-arg-runcommand-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - apk - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/issues/10479, https://github.com/ansible-collections/community.general/pull/10520). diff --git a/changelogs/fragments/10523-bzr-cmd-list.yml b/changelogs/fragments/10523-bzr-cmd-list.yml new file mode 100644 index 0000000000..fb6c8a6c47 --- /dev/null +++ b/changelogs/fragments/10523-bzr-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - bzr - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10523). diff --git a/changelogs/fragments/10524-capabilities-cmd-list.yml b/changelogs/fragments/10524-capabilities-cmd-list.yml new file mode 100644 index 0000000000..e6af832b5c --- /dev/null +++ b/changelogs/fragments/10524-capabilities-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - capabilities - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10524). diff --git a/changelogs/fragments/10525-composer-cmd-list.yml b/changelogs/fragments/10525-composer-cmd-list.yml new file mode 100644 index 0000000000..a2aebc8a6d --- /dev/null +++ b/changelogs/fragments/10525-composer-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - composer - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10525). diff --git a/changelogs/fragments/10526-easy-install-cmd-list.yml b/changelogs/fragments/10526-easy-install-cmd-list.yml new file mode 100644 index 0000000000..6fa6717adc --- /dev/null +++ b/changelogs/fragments/10526-easy-install-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - easy_install - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10526). diff --git a/changelogs/fragments/10527-keycloak-idp-well-known-url-support.yml b/changelogs/fragments/10527-keycloak-idp-well-known-url-support.yml new file mode 100644 index 0000000000..cc2ae7efa0 --- /dev/null +++ b/changelogs/fragments/10527-keycloak-idp-well-known-url-support.yml @@ -0,0 +1,2 @@ +minor_changes: + - keycloak_identity_provider – add support for ``fromUrl`` to automatically fetch OIDC endpoints from the well-known discovery URL, simplifying identity provider configuration (https://github.com/ansible-collections/community.general/pull/10527). \ No newline at end of file diff --git a/changelogs/fragments/10531-wsl-paramiko.yml b/changelogs/fragments/10531-wsl-paramiko.yml new file mode 100644 index 0000000000..08257d6c78 --- /dev/null +++ b/changelogs/fragments/10531-wsl-paramiko.yml @@ -0,0 +1,3 @@ +bugfixes: + - "wsl connection plugin - avoid deprecated ansible-core paramiko import helper, import paramiko directly instead + (https://github.com/ansible-collections/community.general/issues/10515, https://github.com/ansible-collections/community.general/pull/10531)." diff --git a/changelogs/fragments/10532-apk.yml b/changelogs/fragments/10532-apk.yml new file mode 100644 index 0000000000..84c5d985e8 --- /dev/null +++ b/changelogs/fragments/10532-apk.yml @@ -0,0 +1,2 @@ +bugfixes: + - "apk - fix check for empty/whitespace-only package names (https://github.com/ansible-collections/community.general/pull/10532)." diff --git a/changelogs/fragments/10536-imgadm-cmd-list.yml b/changelogs/fragments/10536-imgadm-cmd-list.yml new file mode 100644 index 0000000000..0f22c774d8 --- /dev/null +++ b/changelogs/fragments/10536-imgadm-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - imgadm - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10536). diff --git a/changelogs/fragments/10538-keycloak-realm-add-support-client-options.yml b/changelogs/fragments/10538-keycloak-realm-add-support-client-options.yml new file mode 100644 index 0000000000..66333b01a8 --- /dev/null +++ b/changelogs/fragments/10538-keycloak-realm-add-support-client-options.yml @@ -0,0 +1,2 @@ +minor_changes: + - keycloak_realm - add support for client-related options and Oauth2 device (https://github.com/ansible-collections/community.general/pull/10538). \ No newline at end of file diff --git a/changelogs/fragments/10539-json_query.yml b/changelogs/fragments/10539-json_query.yml new file mode 100644 index 0000000000..7e84b7ecb0 --- /dev/null +++ b/changelogs/fragments/10539-json_query.yml @@ -0,0 +1,2 @@ +bugfixes: + - "json_query filter plugin - make compatible with lazy evaluation list and dictionary types of ansible-core 2.19 (https://github.com/ansible-collections/community.general/pull/10539)." diff --git a/changelogs/fragments/10566-merge_variables.yml b/changelogs/fragments/10566-merge_variables.yml new file mode 100644 index 0000000000..c0de6dd845 --- /dev/null +++ b/changelogs/fragments/10566-merge_variables.yml @@ -0,0 +1,2 @@ +bugfixes: + - "merge_variables lookup plugin - avoid deprecated functionality from ansible-core 2.19 (https://github.com/ansible-collections/community.general/pull/10566)." diff --git a/changelogs/fragments/10573-logstash-plugin-cmd-list.yml b/changelogs/fragments/10573-logstash-plugin-cmd-list.yml new file mode 100644 index 0000000000..441c1c49a3 --- /dev/null +++ b/changelogs/fragments/10573-logstash-plugin-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - logstash_plugin - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/issues/10479, https://github.com/ansible-collections/community.general/pull/10520). diff --git a/changelogs/fragments/10574-django-runner.yml b/changelogs/fragments/10574-django-runner.yml new file mode 100644 index 0000000000..a0bf6ec6d4 --- /dev/null +++ b/changelogs/fragments/10574-django-runner.yml @@ -0,0 +1,2 @@ +minor_changes: + - django module utils - remove deprecated parameter ``_DjangoRunner`` call (https://github.com/ansible-collections/community.general/pull/10574). diff --git a/changelogs/fragments/10599-open-iscsi-cmd-list.yml b/changelogs/fragments/10599-open-iscsi-cmd-list.yml new file mode 100644 index 0000000000..f8ef659ee9 --- /dev/null +++ b/changelogs/fragments/10599-open-iscsi-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - open_iscsi - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10599). diff --git a/changelogs/fragments/10601-pear-cmd-list.yml b/changelogs/fragments/10601-pear-cmd-list.yml new file mode 100644 index 0000000000..d5ab2d3d0e --- /dev/null +++ b/changelogs/fragments/10601-pear-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - pear - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10601). diff --git a/changelogs/fragments/10602-portage-cmd-list.yml b/changelogs/fragments/10602-portage-cmd-list.yml new file mode 100644 index 0000000000..36b6711e00 --- /dev/null +++ b/changelogs/fragments/10602-portage-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - portage - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10602). diff --git a/changelogs/fragments/10603-riak-cmd-list.yml b/changelogs/fragments/10603-riak-cmd-list.yml new file mode 100644 index 0000000000..1a29a07c7f --- /dev/null +++ b/changelogs/fragments/10603-riak-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - riak - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10603). diff --git a/changelogs/fragments/10604-solaris-zone-cmd-list.yml b/changelogs/fragments/10604-solaris-zone-cmd-list.yml new file mode 100644 index 0000000000..2fe52cbf31 --- /dev/null +++ b/changelogs/fragments/10604-solaris-zone-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - solaris_zone - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10604). diff --git a/changelogs/fragments/10605-swupd-cmd-list.yml b/changelogs/fragments/10605-swupd-cmd-list.yml new file mode 100644 index 0000000000..23669d7974 --- /dev/null +++ b/changelogs/fragments/10605-swupd-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - swupd - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10605). diff --git a/changelogs/fragments/10606-urpmi-cmd-list.yml b/changelogs/fragments/10606-urpmi-cmd-list.yml new file mode 100644 index 0000000000..a7a2e54a1e --- /dev/null +++ b/changelogs/fragments/10606-urpmi-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - urpmi - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10606). diff --git a/changelogs/fragments/10608-xbps-cmd-list.yml b/changelogs/fragments/10608-xbps-cmd-list.yml new file mode 100644 index 0000000000..ff951a4520 --- /dev/null +++ b/changelogs/fragments/10608-xbps-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - xbps - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10608). diff --git a/changelogs/fragments/10609-xfs-quota-cmd-list.yml b/changelogs/fragments/10609-xfs-quota-cmd-list.yml new file mode 100644 index 0000000000..74e170ef09 --- /dev/null +++ b/changelogs/fragments/10609-xfs-quota-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - xfs_quota - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10609). diff --git a/changelogs/fragments/10612-timezone-cmd-list.yml b/changelogs/fragments/10612-timezone-cmd-list.yml new file mode 100644 index 0000000000..601375fbc5 --- /dev/null +++ b/changelogs/fragments/10612-timezone-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - timezone - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10612). diff --git a/changelogs/fragments/10642-parted-cmd-list.yml b/changelogs/fragments/10642-parted-cmd-list.yml new file mode 100644 index 0000000000..29025512dd --- /dev/null +++ b/changelogs/fragments/10642-parted-cmd-list.yml @@ -0,0 +1,2 @@ +minor_changes: + - parted - using safer mechanism to run external command (https://github.com/ansible-collections/community.general/pull/10642). diff --git a/changelogs/fragments/10644-oneview-os.yml b/changelogs/fragments/10644-oneview-os.yml new file mode 100644 index 0000000000..f2789cf5fc --- /dev/null +++ b/changelogs/fragments/10644-oneview-os.yml @@ -0,0 +1,2 @@ +breaking_changes: + - oneview module utils - remove import of standard library ``os`` (https://github.com/ansible-collections/community.general/pull/10644). diff --git a/changelogs/fragments/10646-scaleway_container_cpu_limit.yml b/changelogs/fragments/10646-scaleway_container_cpu_limit.yml new file mode 100644 index 0000000000..f23a1bb96d --- /dev/null +++ b/changelogs/fragments/10646-scaleway_container_cpu_limit.yml @@ -0,0 +1,2 @@ +minor_changes: + - scaleway_container - add a ``cpu_limit`` argument (https://github.com/ansible-collections/community.general/pull/10646). diff --git a/changelogs/fragments/10647-scaleway-module-defaults.yml b/changelogs/fragments/10647-scaleway-module-defaults.yml new file mode 100644 index 0000000000..7fca7a171a --- /dev/null +++ b/changelogs/fragments/10647-scaleway-module-defaults.yml @@ -0,0 +1,2 @@ +minor_changes: + - scaleway modules - add a ``scaleway`` group to use ``module_defaults`` (https://github.com/ansible-collections/community.general/pull/10647). diff --git a/changelogs/fragments/10652-oracle-deprecation.yml b/changelogs/fragments/10652-oracle-deprecation.yml new file mode 100644 index 0000000000..3842e994f8 --- /dev/null +++ b/changelogs/fragments/10652-oracle-deprecation.yml @@ -0,0 +1,4 @@ +deprecated_features: + - oci_utils module utils - utils is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/issues/10318, https://github.com/ansible-collections/community.general/pull/10652). + - oci_vcn - module is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/issues/10318, https://github.com/ansible-collections/community.general/pull/10652). + - oracle* doc fragments - fragments are deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/issues/10318, https://github.com/ansible-collections/community.general/pull/10652). diff --git a/changelogs/fragments/10661-support-gpg-auto-impor-keys-in-zypper.yml b/changelogs/fragments/10661-support-gpg-auto-impor-keys-in-zypper.yml new file mode 100644 index 0000000000..333121902f --- /dev/null +++ b/changelogs/fragments/10661-support-gpg-auto-impor-keys-in-zypper.yml @@ -0,0 +1,2 @@ +minor_changes: + - zypper - support the ``--gpg-auto-import-keys`` option in zypper (https://github.com/ansible-collections/community.general/issues/10660, https://github.com/ansible-collections/community.general/pull/10661). diff --git a/changelogs/fragments/10663-pacemaker-resource-fix-resource-type.yml b/changelogs/fragments/10663-pacemaker-resource-fix-resource-type.yml new file mode 100644 index 0000000000..270488d248 --- /dev/null +++ b/changelogs/fragments/10663-pacemaker-resource-fix-resource-type.yml @@ -0,0 +1,2 @@ +bugfixes: + - "pacemaker_resource - fix ``resource_type`` parameter formatting (https://github.com/ansible-collections/community.general/issues/10426, https://github.com/ansible-collections/community.general/pull/10663)." diff --git a/changelogs/fragments/10665-pacemaker-resource-clone.yml b/changelogs/fragments/10665-pacemaker-resource-clone.yml new file mode 100644 index 0000000000..c24420c598 --- /dev/null +++ b/changelogs/fragments/10665-pacemaker-resource-clone.yml @@ -0,0 +1,2 @@ +minor_changes: + - pacemaker_resource - add ``state=cloned`` for cloning pacemaker resources or groups (https://github.com/ansible-collections/community.general/issues/10322, https://github.com/ansible-collections/community.general/pull/10665). diff --git a/changelogs/fragments/10679-gitlab-access-token-add-planner-role.yml b/changelogs/fragments/10679-gitlab-access-token-add-planner-role.yml new file mode 100644 index 0000000000..65aeae2a86 --- /dev/null +++ b/changelogs/fragments/10679-gitlab-access-token-add-planner-role.yml @@ -0,0 +1,3 @@ +minor_changes: + - gitlab_group_access_token - add ``planner`` access level (https://github.com/ansible-collections/community.general/pull/10679). + - gitlab_project_access_token - add ``planner`` access level (https://github.com/ansible-collections/community.general/pull/10679). diff --git a/changelogs/fragments/10684-django-improvements.yml b/changelogs/fragments/10684-django-improvements.yml new file mode 100644 index 0000000000..a8ca1cfbe9 --- /dev/null +++ b/changelogs/fragments/10684-django-improvements.yml @@ -0,0 +1,4 @@ +minor_changes: + - django module utils - simplify/consolidate the common settings for the command line (https://github.com/ansible-collections/community.general/pull/10684). + - django_check - simplify/consolidate the common settings for the command line (https://github.com/ansible-collections/community.general/pull/10684). + - django_createcachetable - simplify/consolidate the common settings for the command line (https://github.com/ansible-collections/community.general/pull/10684). diff --git a/changelogs/fragments/10687-deprecations.yml b/changelogs/fragments/10687-deprecations.yml new file mode 100644 index 0000000000..62974ab6a0 --- /dev/null +++ b/changelogs/fragments/10687-deprecations.yml @@ -0,0 +1,2 @@ +bugfixes: + - "Avoid deprecated functionality in ansible-core 2.20 (https://github.com/ansible-collections/community.general/pull/10687)." diff --git a/changelogs/fragments/10688-pids.yml b/changelogs/fragments/10688-pids.yml new file mode 100644 index 0000000000..1ed97a6fed --- /dev/null +++ b/changelogs/fragments/10688-pids.yml @@ -0,0 +1,2 @@ +bugfixes: + - "pids - prevent error when an empty string is provided for ``name`` (https://github.com/ansible-collections/community.general/issues/10672, https://github.com/ansible-collections/community.general/pull/10688)." diff --git a/changelogs/fragments/10689-gem-prevent-soundness-issue.yml b/changelogs/fragments/10689-gem-prevent-soundness-issue.yml new file mode 100644 index 0000000000..a55dba1ea1 --- /dev/null +++ b/changelogs/fragments/10689-gem-prevent-soundness-issue.yml @@ -0,0 +1,2 @@ +bugfixes: + - "gem - fix soundness issue when uninstalling default gems on Ubuntu (https://github.com/ansible-collections/community.general/issues/10451, https://github.com/ansible-collections/community.general/pull/10689)." \ No newline at end of file diff --git a/changelogs/fragments/10700-django-check-databases.yml b/changelogs/fragments/10700-django-check-databases.yml new file mode 100644 index 0000000000..cfb8897f6a --- /dev/null +++ b/changelogs/fragments/10700-django-check-databases.yml @@ -0,0 +1,2 @@ +minor_changes: + - django_check - rename parameter ``database`` to ``databases``, add alias for compatibility (https://github.com/ansible-collections/community.general/pull/10700). diff --git a/changelogs/fragments/10705-openbsd-pkg-remove-unused.yml b/changelogs/fragments/10705-openbsd-pkg-remove-unused.yml new file mode 100644 index 0000000000..2ceb1352b4 --- /dev/null +++ b/changelogs/fragments/10705-openbsd-pkg-remove-unused.yml @@ -0,0 +1,2 @@ +minor_changes: + - openbsd_pkg - add ``autoremove`` parameter to remove unused dependencies (https://github.com/ansible-collections/community.general/pull/10705). diff --git a/changelogs/fragments/10707-pacemaker-maintenance-mode-regex.yml b/changelogs/fragments/10707-pacemaker-maintenance-mode-regex.yml new file mode 100644 index 0000000000..ba5e08edd3 --- /dev/null +++ b/changelogs/fragments/10707-pacemaker-maintenance-mode-regex.yml @@ -0,0 +1,2 @@ +bugfixes: + - "pacemaker - use regex for matching ``maintenance-mode`` output to determine cluster maintenance status (https://github.com/ansible-collections/community.general/issues/10426, https://github.com/ansible-collections/community.general/pull/10707)." diff --git a/changelogs/fragments/10711-pytohn-idioms-1.yml b/changelogs/fragments/10711-pytohn-idioms-1.yml new file mode 100644 index 0000000000..18ae9db37b --- /dev/null +++ b/changelogs/fragments/10711-pytohn-idioms-1.yml @@ -0,0 +1,6 @@ +minor_changes: + - gitlab_label - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10711). + - gitlab_milestone - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10711). + - ipa_host - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10711). + - lvg_rename - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10711). + - terraform - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10711). diff --git a/changelogs/fragments/10712-python-idioms-2.yml b/changelogs/fragments/10712-python-idioms-2.yml new file mode 100644 index 0000000000..8d49f1f86f --- /dev/null +++ b/changelogs/fragments/10712-python-idioms-2.yml @@ -0,0 +1,7 @@ +minor_changes: + - iocage inventory plugin - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10712). + - manageiq - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10712). + - android_sdk - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10712). + - elasticsearch_plugin - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10712). + - manageiq_alert_profiles - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10712). + - one_vm - minor refactor to improve readability (https://github.com/ansible-collections/community.general/pull/10712). diff --git a/changelogs/fragments/10727-python-idioms-3.yml b/changelogs/fragments/10727-python-idioms-3.yml new file mode 100644 index 0000000000..9b92b8bbef --- /dev/null +++ b/changelogs/fragments/10727-python-idioms-3.yml @@ -0,0 +1,10 @@ +minor_changes: + - filesize - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727). + - iptables_state - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727). + - manageiq_group - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727). + - manageiq_tenant - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727). + - mssql_db - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727). + - openbsd_pkg - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727). + - ufw - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727). + - xenserver_facts - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727). + - zfs_facts - minor refactor to simplify string formatting (https://github.com/ansible-collections/community.general/pull/10727). diff --git a/changelogs/fragments/10741-pacemaker-cluster-cleanup-deprecate.yml b/changelogs/fragments/10741-pacemaker-cluster-cleanup-deprecate.yml new file mode 100644 index 0000000000..4bb018a9c7 --- /dev/null +++ b/changelogs/fragments/10741-pacemaker-cluster-cleanup-deprecate.yml @@ -0,0 +1,2 @@ +deprecated_features: + - pacemaker_cluster - the state ``cleanup`` will be removed from community.general 14.0.0 (https://github.com/ansible-collections/community.general/pull/10741). diff --git a/changelogs/fragments/10743-monit-handle-unknown-status.yml b/changelogs/fragments/10743-monit-handle-unknown-status.yml new file mode 100644 index 0000000000..1c9fbb1101 --- /dev/null +++ b/changelogs/fragments/10743-monit-handle-unknown-status.yml @@ -0,0 +1,2 @@ +bugfixes: + - monit - fix crash caused by an unknown status value returned from the monit service (https://github.com/ansible-collections/community.general/issues/10742, https://github.com/ansible-collections/community.general/pull/10743). diff --git a/changelogs/fragments/10751-kdeconfig-support-kwriteconfig6.yml b/changelogs/fragments/10751-kdeconfig-support-kwriteconfig6.yml new file mode 100644 index 0000000000..716ffa35f1 --- /dev/null +++ b/changelogs/fragments/10751-kdeconfig-support-kwriteconfig6.yml @@ -0,0 +1,3 @@ +bugfixes: + - kdeconfig - ``kwriteconfig`` executable could not be discovered automatically on systems with only ``kwriteconfig6`` installed. + ``kwriteconfig6`` can now be discovered by Ansible (https://github.com/ansible-collections/community.general/issues/10746, https://github.com/ansible-collections/community.general/pull/10751). \ No newline at end of file diff --git a/changelogs/fragments/10752-selective-hardcoded-loop-var.yml b/changelogs/fragments/10752-selective-hardcoded-loop-var.yml new file mode 100644 index 0000000000..cfc6bdd9e9 --- /dev/null +++ b/changelogs/fragments/10752-selective-hardcoded-loop-var.yml @@ -0,0 +1,2 @@ +bugfixes: + - selective callback plugin - specify ``ansible_loop_var`` instead of the explicit value ``item`` when printing task result (https://github.com/ansible-collections/community.general/pull/10752). diff --git a/changelogs/fragments/10769-xenserver-rf.yml b/changelogs/fragments/10769-xenserver-rf.yml new file mode 100644 index 0000000000..2c31edf886 --- /dev/null +++ b/changelogs/fragments/10769-xenserver-rf.yml @@ -0,0 +1,2 @@ +minor_changes: + - xenserver module utils - remove redundant constructs from argument specs (https://github.com/ansible-collections/community.general/pull/10769). diff --git a/changelogs/fragments/10785-gitlab-token-add-missing-scopes.yml b/changelogs/fragments/10785-gitlab-token-add-missing-scopes.yml new file mode 100644 index 0000000000..a38d98a444 --- /dev/null +++ b/changelogs/fragments/10785-gitlab-token-add-missing-scopes.yml @@ -0,0 +1,3 @@ +minor_changes: + - gitlab_group_access_token - add missing scopes (https://github.com/ansible-collections/community.general/pull/10785). + - gitlab_project_access_token - add missing scopes (https://github.com/ansible-collections/community.general/pull/10785). diff --git a/changelogs/fragments/10787-gitlab-variable-support-masked-and-hidden-variables.yml b/changelogs/fragments/10787-gitlab-variable-support-masked-and-hidden-variables.yml new file mode 100644 index 0000000000..bbf5b6d9a5 --- /dev/null +++ b/changelogs/fragments/10787-gitlab-variable-support-masked-and-hidden-variables.yml @@ -0,0 +1,3 @@ +minor_changes: + - gitlab_group_variable - support masked-and-hidden variables (https://github.com/ansible-collections/community.general/pull/10787). + - gitlab_project_variable - support masked-and-hidden variables (https://github.com/ansible-collections/community.general/pull/10787). diff --git a/changelogs/fragments/10795-gitlab_protected_branch-add-allow_force_push-code_owner_approval_required.yml b/changelogs/fragments/10795-gitlab_protected_branch-add-allow_force_push-code_owner_approval_required.yml new file mode 100644 index 0000000000..ed4d4d78e8 --- /dev/null +++ b/changelogs/fragments/10795-gitlab_protected_branch-add-allow_force_push-code_owner_approval_required.yml @@ -0,0 +1,3 @@ +minor_changes: + - gitlab_protected_branch - add ``allow_force_push``, ``code_owner_approval_required`` (https://github.com/ansible-collections/community.general/pull/10795, https://github.com/ansible-collections/community.general/issues/6432, https://github.com/ansible-collections/community.general/issues/10289, https://github.com/ansible-collections/community.general/issues/10765). + - gitlab_protected_branch - update protected branches if possible instead of recreating them (https://github.com/ansible-collections/community.general/pull/10795). diff --git a/changelogs/fragments/10796-rocketchat-force-content-type.yml b/changelogs/fragments/10796-rocketchat-force-content-type.yml new file mode 100644 index 0000000000..96ca116e62 --- /dev/null +++ b/changelogs/fragments/10796-rocketchat-force-content-type.yml @@ -0,0 +1,2 @@ +bugfixes: + - rocketchat - fix message delivery in Rocket Chat >= 7.5.3 by forcing ``Content-Type`` header to ``application/json`` instead of the default ``application/x-www-form-urlencoded`` (https://github.com/ansible-collections/community.general/issues/10796, https://github.com/ansible-collections/community.general/pull/10796). diff --git a/changelogs/fragments/10805-homebrew-support-old-names.yml b/changelogs/fragments/10805-homebrew-support-old-names.yml new file mode 100644 index 0000000000..43d5a1c8bf --- /dev/null +++ b/changelogs/fragments/10805-homebrew-support-old-names.yml @@ -0,0 +1,2 @@ +bugfixes: + - homebrew - do not fail when cask or formula name has changed in homebrew repo (https://github.com/ansible-collections/community.general/issues/10804, https://github.com/ansible-collections/community.general/pull/10805). \ No newline at end of file diff --git a/changelogs/fragments/10810-github_app_access_token-jwt.yml b/changelogs/fragments/10810-github_app_access_token-jwt.yml new file mode 100644 index 0000000000..804ab9fbaa --- /dev/null +++ b/changelogs/fragments/10810-github_app_access_token-jwt.yml @@ -0,0 +1,2 @@ +bugfixes: + - "github_app_access_token lookup plugin - fix compatibility imports for using jwt (https://github.com/ansible-collections/community.general/issues/10807, https://github.com/ansible-collections/community.general/pull/10810)." diff --git a/changelogs/fragments/10812-gitlab-variable-add-description.yml b/changelogs/fragments/10812-gitlab-variable-add-description.yml new file mode 100644 index 0000000000..1de0405aff --- /dev/null +++ b/changelogs/fragments/10812-gitlab-variable-add-description.yml @@ -0,0 +1,4 @@ +minor_changes: + - gitlab_group_variable - add ``description`` option (https://github.com/ansible-collections/community.general/pull/10812). + - gitlab_instance_variable - add ``description`` option (https://github.com/ansible-collections/community.general/pull/10812). + - gitlab_project_variable - add ``description`` option (https://github.com/ansible-collections/community.general/pull/10812, https://github.com/ansible-collections/community.general/issues/8584, https://github.com/ansible-collections/community.general/issues/10809). diff --git a/changelogs/fragments/10823-parted-fail-json-command.yml b/changelogs/fragments/10823-parted-fail-json-command.yml new file mode 100644 index 0000000000..8a52be589e --- /dev/null +++ b/changelogs/fragments/10823-parted-fail-json-command.yml @@ -0,0 +1,2 @@ +bugfixes: + - parted - variable is a list, not text (https://github.com/ansible-collections/community.general/pull/10823, https://github.com/ansible-collections/community.general/issues/10817). diff --git a/changelogs/fragments/10829-fix-keycloak-role-changed-status.yml b/changelogs/fragments/10829-fix-keycloak-role-changed-status.yml new file mode 100644 index 0000000000..8fd05ec182 --- /dev/null +++ b/changelogs/fragments/10829-fix-keycloak-role-changed-status.yml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_role - fixes an issue where the module incorrectly returns ``changed=true`` when using the alias ``clientId`` in composite roles (https://github.com/ansible-collections/community.general/pull/10829). \ No newline at end of file diff --git a/changelogs/fragments/10840-fix-keycloak-subgroup-search-realm.yml b/changelogs/fragments/10840-fix-keycloak-subgroup-search-realm.yml new file mode 100644 index 0000000000..3b7818ee3e --- /dev/null +++ b/changelogs/fragments/10840-fix-keycloak-subgroup-search-realm.yml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_group - fixes an issue where module ignores realm when searching subgroups by name (https://github.com/ansible-collections/community.general/pull/10840). \ No newline at end of file diff --git a/changelogs/fragments/10842-keycloak-client-scope-support.yml b/changelogs/fragments/10842-keycloak-client-scope-support.yml new file mode 100644 index 0000000000..80266fa43b --- /dev/null +++ b/changelogs/fragments/10842-keycloak-client-scope-support.yml @@ -0,0 +1,2 @@ +minor_changes: + - keycloak_client - add idempotent support for ``optional_client_scopes`` and ``optional_client_scopes``, and ensure consistent change detection between check mode and live run (https://github.com/ansible-collections/community.general/issues/5495, https://github.com/ansible-collections/community.general/pull/10842). \ No newline at end of file diff --git a/changelogs/fragments/10852-yaml.yml b/changelogs/fragments/10852-yaml.yml new file mode 100644 index 0000000000..1319b94ab5 --- /dev/null +++ b/changelogs/fragments/10852-yaml.yml @@ -0,0 +1,2 @@ +bugfixes: + - "yaml cache plugin - make compatible with ansible-core 2.19 (https://github.com/ansible-collections/community.general/issues/10849, https://github.com/ansible-collections/community.general/issues/10852)." diff --git a/changelogs/fragments/10857-github_deploy_key-err.yml b/changelogs/fragments/10857-github_deploy_key-err.yml new file mode 100644 index 0000000000..58bac31c5e --- /dev/null +++ b/changelogs/fragments/10857-github_deploy_key-err.yml @@ -0,0 +1,2 @@ +bugfixes: + - "github_deploy_key - fix bug during error handling if no body was present in the result (https://github.com/ansible-collections/community.general/issues/10853, https://github.com/ansible-collections/community.general/pull/10857)." diff --git a/changelogs/fragments/10873-six.yml b/changelogs/fragments/10873-six.yml new file mode 100644 index 0000000000..d9ea201520 --- /dev/null +++ b/changelogs/fragments/10873-six.yml @@ -0,0 +1,2 @@ +bugfixes: + - "Avoid usage of deprecated ``ansible.module_utils.six`` in all code that does not have to support Python 2 (https://github.com/ansible-collections/community.general/pull/10873)." diff --git a/changelogs/fragments/10874-pipx-180.yml b/changelogs/fragments/10874-pipx-180.yml new file mode 100644 index 0000000000..dd776827e8 --- /dev/null +++ b/changelogs/fragments/10874-pipx-180.yml @@ -0,0 +1,2 @@ +minor_changes: + - pipx module_utils - use ``PIPX_USE_EMOJI`` to disable emojis in the output of ``pipx`` 1.8.0 (https://github.com/ansible-collections/community.general/pull/10874). diff --git a/changelogs/fragments/10880-github_app_access_token-lookup.yml b/changelogs/fragments/10880-github_app_access_token-lookup.yml new file mode 100644 index 0000000000..b3c9503d59 --- /dev/null +++ b/changelogs/fragments/10880-github_app_access_token-lookup.yml @@ -0,0 +1,2 @@ +minor_changes: + - "github_app_access_token lookup plugin - add support for GitHub Enterprise Server (https://github.com/ansible-collections/community.general/issues/10879, https://github.com/ansible-collections/community.general/pull/10880)." diff --git a/changelogs/fragments/10888-six.yml b/changelogs/fragments/10888-six.yml new file mode 100644 index 0000000000..b1f09accb3 --- /dev/null +++ b/changelogs/fragments/10888-six.yml @@ -0,0 +1,2 @@ +bugfixes: + - "Remove all usage of ``ansible.module_utils.six`` (https://github.com/ansible-collections/community.general/pull/10888)." diff --git a/changelogs/fragments/10891-dict-refactor.yml b/changelogs/fragments/10891-dict-refactor.yml new file mode 100644 index 0000000000..63d5e585ff --- /dev/null +++ b/changelogs/fragments/10891-dict-refactor.yml @@ -0,0 +1,6 @@ +minor_changes: + - dependent lookup plugin - refactor dict initialization, no impact to users (https://github.com/ansible-collections/community.general/pull/10891). + - scaleway module_utils - improve code readability, no impact to users (https://github.com/ansible-collections/community.general/pull/10891). + - pacemaker_cluster.py - refactor dict initialization, no impact to users (https://github.com/ansible-collections/community.general/pull/10891). + - pacemaker_resource.py - refactor dict initialization, no impact to users (https://github.com/ansible-collections/community.general/pull/10891). + - pacemaker_stonith.py - refactor dict initialization, no impact to users (https://github.com/ansible-collections/community.general/pull/10891). diff --git a/changelogs/fragments/10892-remove-py2.yml b/changelogs/fragments/10892-remove-py2.yml new file mode 100644 index 0000000000..69904d4777 --- /dev/null +++ b/changelogs/fragments/10892-remove-py2.yml @@ -0,0 +1,7 @@ +minor_changes: + - known_hosts module_utils - drop Python 2 support when parsing output of ``urlparse`` (https://github.com/ansible-collections/community.general/pull/10892). + - aix_inittab - drop Python 2 support for function ``zip`` (https://github.com/ansible-collections/community.general/pull/10892). + - copr - drop support for Python 2 interpreter (https://github.com/ansible-collections/community.general/pull/10892). + - dconf - drop support for Python 2 interpreter (https://github.com/ansible-collections/community.general/pull/10892). + - irc - drop Python 2 support for SSL context creation (https://github.com/ansible-collections/community.general/pull/10892). + - mail - drop Python 2 support for Message-ID domain setting (https://github.com/ansible-collections/community.general/pull/10892). diff --git a/changelogs/fragments/10899-use-f-strings.yml b/changelogs/fragments/10899-use-f-strings.yml new file mode 100644 index 0000000000..9752e5ebf2 --- /dev/null +++ b/changelogs/fragments/10899-use-f-strings.yml @@ -0,0 +1,14 @@ +minor_changes: + - wsl connection plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - accumulate filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - counter filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - crc32 filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - groupby_as_dict filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - hashids filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - json_query filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - lists filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - random_mac filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - time filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - unicode_normalize filter plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - passwordstore lookup plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). + - ansible_type plugin_utils plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10899). diff --git a/changelogs/fragments/10903-2to3.yml b/changelogs/fragments/10903-2to3.yml new file mode 100644 index 0000000000..af0b744456 --- /dev/null +++ b/changelogs/fragments/10903-2to3.yml @@ -0,0 +1,8 @@ +minor_changes: + - pickle cache plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10903). + - counter_enabled callback plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10903). + - wsl connection plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10903). + - cobbler inventory plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10903). + - linode inventory plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10903). + - utm_utils module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10903). + - vexata module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10903). diff --git a/changelogs/fragments/10904-2to3-mods.yml b/changelogs/fragments/10904-2to3-mods.yml new file mode 100644 index 0000000000..12ca58b250 --- /dev/null +++ b/changelogs/fragments/10904-2to3-mods.yml @@ -0,0 +1,30 @@ +minor_changes: + - bitbucket_access_key - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - bitbucket_pipeline_known_host - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - bitbucket_pipeline_variable - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - bzr - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - capabilities - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - gitlab_milestone - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - haproxy - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - homebrew - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - homebrew_cask - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - hwc_network_vpc - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - hwc_smn_topic - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - idrac_redfish_config - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - idrac_redfish_info - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - influxdb_retention_policy - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - ini_file - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - interfaces_file - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - launchd - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - logentries - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - packet_sshkey - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - pamd - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - taiga_issue - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - vdo - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - vertica_role - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - vertica_schema - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - vertica_user - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - vexata_eg - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - vexata_volume - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - xcc_redfish_command - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). + - zypper - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10904). diff --git a/changelogs/fragments/10905-java-keystore-simplify.yml b/changelogs/fragments/10905-java-keystore-simplify.yml new file mode 100644 index 0000000000..7b2a0de53a --- /dev/null +++ b/changelogs/fragments/10905-java-keystore-simplify.yml @@ -0,0 +1,2 @@ +minor_changes: + - java_keystore - remove redundant function (https://github.com/ansible-collections/community.general/pull/10905). diff --git a/changelogs/fragments/10906-linode-modutils.yml b/changelogs/fragments/10906-linode-modutils.yml new file mode 100644 index 0000000000..ced88a7474 --- /dev/null +++ b/changelogs/fragments/10906-linode-modutils.yml @@ -0,0 +1,2 @@ +minor_changes: + - linode module utils - remove redundant code for ancient versions of Ansible (https://github.com/ansible-collections/community.general/pull/10906). diff --git a/changelogs/fragments/10907-2to3-mu.yml b/changelogs/fragments/10907-2to3-mu.yml new file mode 100644 index 0000000000..af19593cf0 --- /dev/null +++ b/changelogs/fragments/10907-2to3-mu.yml @@ -0,0 +1,9 @@ +minor_changes: + - csv module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907). + - gitlab module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907). + - homebrew module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907). + - ilo_redfish_utils module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907). + - redfish_utils module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907). + - saslprep module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907). + - utm_utils module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907). + - vexata module_utils plugin - use Python 3 idioms (https://github.com/ansible-collections/community.general/pull/10907). diff --git a/changelogs/fragments/10908-archive-lzma.yml b/changelogs/fragments/10908-archive-lzma.yml new file mode 100644 index 0000000000..bcce681bed --- /dev/null +++ b/changelogs/fragments/10908-archive-lzma.yml @@ -0,0 +1,2 @@ +minor_changes: + - archive - remove conditional code for older Python versions (https://github.com/ansible-collections/community.general/pull/10908). diff --git a/changelogs/fragments/10909-launchd-plistlib.yml b/changelogs/fragments/10909-launchd-plistlib.yml new file mode 100644 index 0000000000..fc798c9ddb --- /dev/null +++ b/changelogs/fragments/10909-launchd-plistlib.yml @@ -0,0 +1,2 @@ +minor_changes: + - launchd - remove conditional code supporting Python versions prior to 3.4 (https://github.com/ansible-collections/community.general/pull/10909). diff --git a/changelogs/fragments/10918-gitlab-runner-fix-check-mode.yml b/changelogs/fragments/10918-gitlab-runner-fix-check-mode.yml new file mode 100644 index 0000000000..214487938b --- /dev/null +++ b/changelogs/fragments/10918-gitlab-runner-fix-check-mode.yml @@ -0,0 +1,2 @@ +bugfixes: + - gitlab_runner - fix exception in check mode when a new runner is created (https://github.com/ansible-collections/community.general/issues/8854). diff --git a/changelogs/fragments/10933-keycloak-add-client-auth-for-clientsecret-modules.yml b/changelogs/fragments/10933-keycloak-add-client-auth-for-clientsecret-modules.yml new file mode 100644 index 0000000000..df70186ff5 --- /dev/null +++ b/changelogs/fragments/10933-keycloak-add-client-auth-for-clientsecret-modules.yml @@ -0,0 +1,2 @@ +bugfixes: + - keycloak_clientsecret, keycloak_clientsecret_info - make ``client_auth`` work (https://github.com/ansible-collections/community.general/issues/10932, https://github.com/ansible-collections/community.general/pull/10933). \ No newline at end of file diff --git a/changelogs/fragments/10934-cloudflare-dns-srv-bug.yml b/changelogs/fragments/10934-cloudflare-dns-srv-bug.yml new file mode 100644 index 0000000000..eb2b06d2f1 --- /dev/null +++ b/changelogs/fragments/10934-cloudflare-dns-srv-bug.yml @@ -0,0 +1,2 @@ +bugfixes: + - cloudflare_dns - roll back changes to SRV record validation (https://github.com/ansible-collections/community.general/issues/10934, https://github.com/ansible-collections/community.general/pull/10937). diff --git a/changelogs/fragments/10940-use-f-strings-xenserver.yml b/changelogs/fragments/10940-use-f-strings-xenserver.yml new file mode 100644 index 0000000000..114ac46486 --- /dev/null +++ b/changelogs/fragments/10940-use-f-strings-xenserver.yml @@ -0,0 +1,2 @@ +minor_changes: + - xenserver module utils plugin - use f-strings for string templating (https://github.com/ansible-collections/community.general/pull/10940). diff --git a/changelogs/fragments/9499-typetalk-deprecation.yml b/changelogs/fragments/9499-typetalk-deprecation.yml new file mode 100644 index 0000000000..8323bbe959 --- /dev/null +++ b/changelogs/fragments/9499-typetalk-deprecation.yml @@ -0,0 +1,2 @@ +deprecated_features: + - typetalk - module is deprecated and will be removed in community.general 13.0.0 (https://github.com/ansible-collections/community.general/pull/9499). diff --git a/changelogs/fragments/ansible-core-2.16.yml b/changelogs/fragments/ansible-core-2.16.yml new file mode 100644 index 0000000000..1132d20e3e --- /dev/null +++ b/changelogs/fragments/ansible-core-2.16.yml @@ -0,0 +1,2 @@ +removed_features: + - "Ansible-core 2.16 is no longer supported. This also means that the collection now requires Python 3.7+ (https://github.com/ansible-collections/community.general/pull/10884)." diff --git a/changelogs/fragments/become-pipelining.yml b/changelogs/fragments/become-pipelining.yml new file mode 100644 index 0000000000..201d85f71c --- /dev/null +++ b/changelogs/fragments/become-pipelining.yml @@ -0,0 +1,3 @@ +bugfixes: + - "doas become plugin - disable pipelining on ansible-core 2.19+. The plugin does not work with pipelining, and since ansible-core 2.19 become plugins can indicate that they do not work with pipelining (https://github.com/ansible-collections/community.general/issues/9977, https://github.com/ansible-collections/community.general/pull/10537)." + - "machinectl become plugin - disable pipelining on ansible-core 2.19+. The plugin does not work with pipelining, and since ansible-core 2.19 become plugins can indicate that they do not work with pipelining (https://github.com/ansible-collections/community.general/pull/10537)." diff --git a/changelogs/fragments/deprecations.yml b/changelogs/fragments/deprecations.yml new file mode 100644 index 0000000000..424b2d439b --- /dev/null +++ b/changelogs/fragments/deprecations.yml @@ -0,0 +1,16 @@ +removed_features: + - "yaml callback plugin - the deprecated plugin has been removed. Use the default callback with ``result_format=yaml`` instead (https://github.com/ansible-collections/community.general/pull/10883)." + - "purestorage doc fragment - the modules using this doc fragment have been removed from community.general 3.0.0 (https://github.com/ansible-collections/community.general/pull/10883)." + - "pure module utils - the modules using this module utils have been removed from community.general 3.0.0 (https://github.com/ansible-collections/community.general/pull/10883)." + - "bearychat - the module has been removed as the chat service is no longer available (https://github.com/ansible-collections/community.general/pull/10883)." + - "facter - the module has been replaced by ``community.general.facter_facts`` (https://github.com/ansible-collections/community.general/pull/10883)." + - "pacemaker_cluster - the option ``state`` is now required (https://github.com/ansible-collections/community.general/pull/10883)." + - >- + opkg - the value ``""`` for the option ``force`` is no longer allowed. Omit ``force`` instead (https://github.com/ansible-collections/community.general/pull/10883). + - "cmd_runner_fmt module utils - the parameter ``ctx_ignore_none`` to argument formatters has been removed (https://github.com/ansible-collections/community.general/pull/10883)." + - "cmd_runner module utils - the parameter ``ignore_value_none`` to ``CmdRunner.__call__()`` has been removed (https://github.com/ansible-collections/community.general/pull/10883)." + - >- + mh.deco module utils - the parameters ``on_success`` and ``on_failure`` of ``cause()`` have been removed; use ``when="success"`` and ``when="failure"`` instead (https://github.com/ansible-collections/community.general/pull/10883). +breaking_changes: + - "slack - the default of ``prepend_hash`` changed from ``auto`` to ``never`` (https://github.com/ansible-collections/community.general/pull/10883)." + - "mh.base module utils - ``debug`` will now always be delegated to the underlying ``AnsibleModule`` object (https://github.com/ansible-collections/community.general/pull/10883)." diff --git a/changelogs/fragments/hiera.yml b/changelogs/fragments/hiera.yml new file mode 100644 index 0000000000..70c75f059e --- /dev/null +++ b/changelogs/fragments/hiera.yml @@ -0,0 +1,4 @@ +deprecated_features: + - "hiera lookup plugin - retrieving data with Hiera has been deprecated a long time ago; because of that this plugin will be removed from community.general 13.0.0. + If you disagree with this deprecation, please create an issue in the community.general repository + (https://github.com/ansible-collections/community.general/issues/4462, https://github.com/ansible-collections/community.general/pull/10779)." diff --git a/changelogs/fragments/keycloak-realm-webauthn-policies.yml b/changelogs/fragments/keycloak-realm-webauthn-policies.yml new file mode 100644 index 0000000000..91b1f67b3a --- /dev/null +++ b/changelogs/fragments/keycloak-realm-webauthn-policies.yml @@ -0,0 +1,2 @@ +minor_changes: + - keycloak_realm - add support for WebAuthn policy configuration options, including both regular and passwordless WebAuthn policies (https://github.com/ansible-collections/community.general/pull/10791). diff --git a/changelogs/fragments/logstash.yml b/changelogs/fragments/logstash.yml new file mode 100644 index 0000000000..1c7ec89b7d --- /dev/null +++ b/changelogs/fragments/logstash.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - logstash callback plugin - remove reference to Python 2 library (https://github.com/ansible-collections/community.general/pull/10345). diff --git a/changelogs/fragments/lvm_pv.yml b/changelogs/fragments/lvm_pv.yml new file mode 100644 index 0000000000..d0198d7ffb --- /dev/null +++ b/changelogs/fragments/lvm_pv.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - lvm_pv - properly detect SCSI or NVMe devices to rescan (https://github.com/ansible-collections/community.general/issues/10444, https://github.com/ansible-collections/community.general/pull/10596). diff --git a/changelogs/fragments/random_string_seed.yml b/changelogs/fragments/random_string_seed.yml new file mode 100644 index 0000000000..a90b7d93b5 --- /dev/null +++ b/changelogs/fragments/random_string_seed.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - random_string lookup plugin - allow to specify seed while generating random string (https://github.com/ansible-collections/community.general/issues/5362, https://github.com/ansible-collections/community.general/pull/10710). diff --git a/changelogs/fragments/replace-random-with-secrets.yml b/changelogs/fragments/replace-random-with-secrets.yml new file mode 100644 index 0000000000..b82e59e7e9 --- /dev/null +++ b/changelogs/fragments/replace-random-with-secrets.yml @@ -0,0 +1,4 @@ +bugfixes: + - random_string lookup plugin - replace ``random.SystemRandom()`` with ``secrets.SystemRandom()`` when + generating strings. This has no practical effect, as both are the same + (https://github.com/ansible-collections/community.general/pull/10893). diff --git a/tests/integration/targets/pids/files/obtainpid.sh b/docs/docsite/config.yml similarity index 80% rename from tests/integration/targets/pids/files/obtainpid.sh rename to docs/docsite/config.yml index 1090f87786..1d6cf8554a 100644 --- a/tests/integration/targets/pids/files/obtainpid.sh +++ b/docs/docsite/config.yml @@ -1,7 +1,7 @@ -#!/usr/bin/env bash +--- # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -"$1" 100 & -echo "$!" > "$2" +changelog: + write_changelog: true diff --git a/docs/docsite/extra-docs.yml b/docs/docsite/extra-docs.yml index 2171031ac1..4594ab4c2d 100644 --- a/docs/docsite/extra-docs.yml +++ b/docs/docsite/extra-docs.yml @@ -8,3 +8,17 @@ sections: toctree: - filter_guide - test_guide + - title: Technology Guides + toctree: + - guide_alicloud + - guide_iocage + - guide_online + - guide_packet + - guide_scaleway + - title: Developer Guides + toctree: + - guide_deps + - guide_vardict + - guide_cmdrunner + - guide_modulehelper + - guide_uthelper diff --git a/docs/docsite/helper/lists_mergeby/default-recursive-true.yml b/docs/docsite/helper/lists_mergeby/default-recursive-true.yml deleted file mode 100644 index 133c8f2aec..0000000000 --- a/docs/docsite/helper/lists_mergeby/default-recursive-true.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -list1: - - name: myname01 - param01: - x: default_value - y: default_value - list: - - default_value - - name: myname02 - param01: [1, 1, 2, 3] - -list2: - - name: myname01 - param01: - y: patch_value - z: patch_value - list: - - patch_value - - name: myname02 - param01: [3, 4, 4, {key: value}] diff --git a/docs/docsite/helper/lists_mergeby/example-001.yml b/docs/docsite/helper/lists_mergeby/example-001.yml deleted file mode 100644 index 0cf6a9b8a7..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-001.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -- name: 1. Merge two lists by common attribute 'name' - include_vars: - dir: example-001_vars -- debug: - var: list3 - when: debug|d(false)|bool -- template: - src: list3.out.j2 - dest: example-001.out diff --git a/docs/docsite/helper/lists_mergeby/example-001_vars/default-common.yml b/docs/docsite/helper/lists_mergeby/example-001_vars/default-common.yml deleted file mode 120000 index 7ea8984a8d..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-001_vars/default-common.yml +++ /dev/null @@ -1 +0,0 @@ -../default-common.yml \ No newline at end of file diff --git a/docs/docsite/helper/lists_mergeby/example-002.yml b/docs/docsite/helper/lists_mergeby/example-002.yml deleted file mode 100644 index 5e6e0315df..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-002.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -- name: 2. Merge two lists by common attribute 'name' - include_vars: - dir: example-002_vars -- debug: - var: list3 - when: debug|d(false)|bool -- template: - src: list3.out.j2 - dest: example-002.out diff --git a/docs/docsite/helper/lists_mergeby/example-002_vars/default-common.yml b/docs/docsite/helper/lists_mergeby/example-002_vars/default-common.yml deleted file mode 120000 index 7ea8984a8d..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-002_vars/default-common.yml +++ /dev/null @@ -1 +0,0 @@ -../default-common.yml \ No newline at end of file diff --git a/docs/docsite/helper/lists_mergeby/example-003.yml b/docs/docsite/helper/lists_mergeby/example-003.yml deleted file mode 100644 index 2f93ab8a27..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-003.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -- name: 3. Merge recursive by 'name', replace lists (default) - include_vars: - dir: example-003_vars -- debug: - var: list3 - when: debug|d(false)|bool -- template: - src: list3.out.j2 - dest: example-003.out diff --git a/docs/docsite/helper/lists_mergeby/example-003_vars/default-recursive-true.yml b/docs/docsite/helper/lists_mergeby/example-003_vars/default-recursive-true.yml deleted file mode 120000 index 299736f8ad..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-003_vars/default-recursive-true.yml +++ /dev/null @@ -1 +0,0 @@ -../default-recursive-true.yml \ No newline at end of file diff --git a/docs/docsite/helper/lists_mergeby/example-004.yml b/docs/docsite/helper/lists_mergeby/example-004.yml deleted file mode 100644 index 3ef067faf3..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-004.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -- name: 4. Merge recursive by 'name', keep lists - include_vars: - dir: example-004_vars -- debug: - var: list3 - when: debug|d(false)|bool -- template: - src: list3.out.j2 - dest: example-004.out diff --git a/docs/docsite/helper/lists_mergeby/example-004_vars/default-recursive-true.yml b/docs/docsite/helper/lists_mergeby/example-004_vars/default-recursive-true.yml deleted file mode 120000 index 299736f8ad..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-004_vars/default-recursive-true.yml +++ /dev/null @@ -1 +0,0 @@ -../default-recursive-true.yml \ No newline at end of file diff --git a/docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml deleted file mode 100644 index a054ea1e73..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-004_vars/list3.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -list3: "{{ [list1, list2]| - community.general.lists_mergeby('name', - recursive=true, - list_merge='keep') }}" diff --git a/docs/docsite/helper/lists_mergeby/example-005.yml b/docs/docsite/helper/lists_mergeby/example-005.yml deleted file mode 100644 index 57e7a779d9..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-005.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -- name: 5. Merge recursive by 'name', append lists - include_vars: - dir: example-005_vars -- debug: - var: list3 - when: debug|d(false)|bool -- template: - src: list3.out.j2 - dest: example-005.out diff --git a/docs/docsite/helper/lists_mergeby/example-005_vars/default-recursive-true.yml b/docs/docsite/helper/lists_mergeby/example-005_vars/default-recursive-true.yml deleted file mode 120000 index 299736f8ad..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-005_vars/default-recursive-true.yml +++ /dev/null @@ -1 +0,0 @@ -../default-recursive-true.yml \ No newline at end of file diff --git a/docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml deleted file mode 100644 index 3480bf6581..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-005_vars/list3.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -list3: "{{ [list1, list2]| - community.general.lists_mergeby('name', - recursive=true, - list_merge='append') }}" diff --git a/docs/docsite/helper/lists_mergeby/example-006.yml b/docs/docsite/helper/lists_mergeby/example-006.yml deleted file mode 100644 index 41fc88e496..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-006.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -- name: 6. Merge recursive by 'name', prepend lists - include_vars: - dir: example-006_vars -- debug: - var: list3 - when: debug|d(false)|bool -- template: - src: list3.out.j2 - dest: example-006.out diff --git a/docs/docsite/helper/lists_mergeby/example-006_vars/default-recursive-true.yml b/docs/docsite/helper/lists_mergeby/example-006_vars/default-recursive-true.yml deleted file mode 120000 index 299736f8ad..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-006_vars/default-recursive-true.yml +++ /dev/null @@ -1 +0,0 @@ -../default-recursive-true.yml \ No newline at end of file diff --git a/docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml deleted file mode 100644 index 97513b5593..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-006_vars/list3.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -list3: "{{ [list1, list2]| - community.general.lists_mergeby('name', - recursive=true, - list_merge='prepend') }}" diff --git a/docs/docsite/helper/lists_mergeby/example-007.yml b/docs/docsite/helper/lists_mergeby/example-007.yml deleted file mode 100644 index 3de7158447..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-007.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -- name: 7. Merge recursive by 'name', append lists 'remove present' - include_vars: - dir: example-007_vars -- debug: - var: list3 - when: debug|d(false)|bool -- template: - src: list3.out.j2 - dest: example-007.out diff --git a/docs/docsite/helper/lists_mergeby/example-007_vars/default-recursive-true.yml b/docs/docsite/helper/lists_mergeby/example-007_vars/default-recursive-true.yml deleted file mode 120000 index 299736f8ad..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-007_vars/default-recursive-true.yml +++ /dev/null @@ -1 +0,0 @@ -../default-recursive-true.yml \ No newline at end of file diff --git a/docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml deleted file mode 100644 index cb51653b49..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-007_vars/list3.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -list3: "{{ [list1, list2]| - community.general.lists_mergeby('name', - recursive=true, - list_merge='append_rp') }}" diff --git a/docs/docsite/helper/lists_mergeby/example-008.yml b/docs/docsite/helper/lists_mergeby/example-008.yml deleted file mode 100644 index e33828bf9a..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-008.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -- name: 8. Merge recursive by 'name', prepend lists 'remove present' - include_vars: - dir: example-008_vars -- debug: - var: list3 - when: debug|d(false)|bool -- template: - src: list3.out.j2 - dest: example-008.out diff --git a/docs/docsite/helper/lists_mergeby/example-008_vars/default-recursive-true.yml b/docs/docsite/helper/lists_mergeby/example-008_vars/default-recursive-true.yml deleted file mode 120000 index 299736f8ad..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-008_vars/default-recursive-true.yml +++ /dev/null @@ -1 +0,0 @@ -../default-recursive-true.yml \ No newline at end of file diff --git a/docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml b/docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml deleted file mode 100644 index af7001fc4a..0000000000 --- a/docs/docsite/helper/lists_mergeby/example-008_vars/list3.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -list3: "{{ [list1, list2]| - community.general.lists_mergeby('name', - recursive=true, - list_merge='prepend_rp') }}" diff --git a/docs/docsite/helper/lists_mergeby/examples.yml b/docs/docsite/helper/lists_mergeby/examples.yml deleted file mode 100644 index 83b985084e..0000000000 --- a/docs/docsite/helper/lists_mergeby/examples.yml +++ /dev/null @@ -1,54 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -examples: - - label: 'In the example below the lists are merged by the attribute ``name``:' - file: example-001_vars/list3.yml - lang: 'yaml+jinja' - - label: 'This produces:' - file: example-001.out - lang: 'yaml' - - label: 'It is possible to use a list of lists as an input of the filter:' - file: example-002_vars/list3.yml - lang: 'yaml+jinja' - - label: 'This produces the same result as in the previous example:' - file: example-002.out - lang: 'yaml' - - label: 'Example ``list_merge=replace`` (default):' - file: example-003_vars/list3.yml - lang: 'yaml+jinja' - - label: 'This produces:' - file: example-003.out - lang: 'yaml' - - label: 'Example ``list_merge=keep``:' - file: example-004_vars/list3.yml - lang: 'yaml+jinja' - - label: 'This produces:' - file: example-004.out - lang: 'yaml' - - label: 'Example ``list_merge=append``:' - file: example-005_vars/list3.yml - lang: 'yaml+jinja' - - label: 'This produces:' - file: example-005.out - lang: 'yaml' - - label: 'Example ``list_merge=prepend``:' - file: example-006_vars/list3.yml - lang: 'yaml+jinja' - - label: 'This produces:' - file: example-006.out - lang: 'yaml' - - label: 'Example ``list_merge=append_rp``:' - file: example-007_vars/list3.yml - lang: 'yaml+jinja' - - label: 'This produces:' - file: example-007.out - lang: 'yaml' - - label: 'Example ``list_merge=prepend_rp``:' - file: example-008_vars/list3.yml - lang: 'yaml+jinja' - - label: 'This produces:' - file: example-008.out - lang: 'yaml' diff --git a/docs/docsite/helper/lists_mergeby/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2 b/docs/docsite/helper/lists_mergeby/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2 deleted file mode 100644 index 71d0d5da6c..0000000000 --- a/docs/docsite/helper/lists_mergeby/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2 +++ /dev/null @@ -1,62 +0,0 @@ -.. - Copyright (c) Ansible Project - GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) - SPDX-License-Identifier: GPL-3.0-or-later - -Merging lists of dictionaries -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If you have two or more lists of dictionaries and want to combine them into a list of merged dictionaries, where the dictionaries are merged by an attribute, you can use the ``lists_mergeby`` filter. - -.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ref:`the documentation for the community.general.yaml callback plugin `. - -Let us use the lists below in the following examples: - -.. code-block:: yaml - - {{ lookup('file', 'default-common.yml')|indent(2) }} - -{% for i in examples[0:2] %} -{{ i.label }} - -.. code-block:: {{ i.lang }} - - {{ lookup('file', i.file)|indent(2) }} - -{% endfor %} - -.. versionadded:: 2.0.0 - -{% for i in examples[2:4] %} -{{ i.label }} - -.. code-block:: {{ i.lang }} - - {{ lookup('file', i.file)|indent(2) }} - -{% endfor %} - -The filter also accepts two optional parameters: ``recursive`` and ``list_merge``. These parameters are only supported when used with ansible-base 2.10 or ansible-core, but not with Ansible 2.9. This is available since community.general 4.4.0. - -**recursive** - Is a boolean, default to ``False``. Should the ``community.general.lists_mergeby`` recursively merge nested hashes. Note: It does not depend on the value of the ``hash_behaviour`` setting in ``ansible.cfg``. - -**list_merge** - Is a string, its possible values are ``replace`` (default), ``keep``, ``append``, ``prepend``, ``append_rp`` or ``prepend_rp``. It modifies the behaviour of ``community.general.lists_mergeby`` when the hashes to merge contain arrays/lists. - -The examples below set ``recursive=true`` and display the differences among all six options of ``list_merge``. Functionality of the parameters is exactly the same as in the filter ``combine``. See :ref:`Combining hashes/dictionaries ` to learn details about these options. - -Let us use the lists below in the following examples - -.. code-block:: yaml - - {{ lookup('file', 'default-recursive-true.yml')|indent(2) }} - -{% for i in examples[4:16] %} -{{ i.label }} - -.. code-block:: {{ i.lang }} - - {{ lookup('file', i.file)|indent(2) }} - -{% endfor %} diff --git a/docs/docsite/helper/lists_mergeby/playbook.yml b/docs/docsite/helper/lists_mergeby/playbook.yml deleted file mode 100644 index 793d233485..0000000000 --- a/docs/docsite/helper/lists_mergeby/playbook.yml +++ /dev/null @@ -1,62 +0,0 @@ ---- -# Copyright (c) Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# 1) Run all examples and create example-XXX.out -# shell> ansible-playbook playbook.yml -e examples=true -# -# 2) Optionally, for testing, create examples_all.rst -# shell> ansible-playbook playbook.yml -e examples_all=true -# -# 3) Create docs REST files -# shell> ansible-playbook playbook.yml -e merging_lists_of_dictionaries=true -# -# Notes: -# * Use YAML callback, e.g. set ANSIBLE_STDOUT_CALLBACK=community.general.yaml -# * Use sphinx-view to render and review the REST files -# shell> sphinx-view /examples_all.rst -# * Proofread and copy completed docs *.rst files into the directory rst. -# * Then delete the *.rst and *.out files from this directory. Do not -# add *.rst and *.out in this directory to the version control. -# -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -# community.general/docs/docsite/helper/lists_mergeby/playbook.yml - -- hosts: localhost - gather_facts: false - tasks: - - - block: - - import_tasks: example-001.yml - tags: t001 - - import_tasks: example-002.yml - tags: t002 - - import_tasks: example-003.yml - tags: t003 - - import_tasks: example-004.yml - tags: t004 - - import_tasks: example-005.yml - tags: t005 - - import_tasks: example-006.yml - tags: t006 - - import_tasks: example-007.yml - tags: t007 - - import_tasks: example-008.yml - tags: t008 - when: examples|d(false)|bool - - - block: - - include_vars: examples.yml - - template: - src: examples_all.rst.j2 - dest: examples_all.rst - when: examples_all|d(false)|bool - - - block: - - include_vars: examples.yml - - template: - src: filter_guide_abstract_informations_merging_lists_of_dictionaries.rst.j2 - dest: filter_guide_abstract_informations_merging_lists_of_dictionaries.rst - when: merging_lists_of_dictionaries|d(false)|bool diff --git a/docs/docsite/links.yml b/docs/docsite/links.yml index bd954c4096..fe41d1d2fd 100644 --- a/docs/docsite/links.yml +++ b/docs/docsite/links.yml @@ -9,6 +9,8 @@ edit_on_github: path_prefix: '' extra_links: + - description: Ask for help + url: https://forum.ansible.com/c/help/6/none - description: Submit a bug report url: https://github.com/ansible-collections/community.general/issues/new?assignees=&labels=&template=bug_report.yml - description: Request a feature @@ -22,6 +24,10 @@ communication: - topic: General usage and support questions network: Libera channel: '#ansible' - mailing_lists: - - topic: Ansible Project List - url: https://groups.google.com/g/ansible-project + forums: + - topic: "Ansible Forum: General usage and support questions" + # The following URL directly points to the "Get Help" section + url: https://forum.ansible.com/c/help/6/none + - topic: "Ansible Forum: Discussions about the collection itself, not for specific modules or plugins" + # The following URL directly points to the "community-general" tag + url: https://forum.ansible.com/tag/community-general diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst new file mode 100644 index 0000000000..3549d29ba7 --- /dev/null +++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-keep_keys.rst @@ -0,0 +1,151 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +keep_keys +""""""""" + +Use the filter :ansplugin:`community.general.keep_keys#filter` if you have a list of dictionaries and want to keep certain keys only. + +.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ansplugin:`the documentation for the community.general.yaml callback plugin `. + + +Let us use the below list in the following examples: + +.. code-block:: yaml + + input: + - k0_x0: A0 + k1_x1: B0 + k2_x2: [C0] + k3_x3: foo + - k0_x0: A1 + k1_x1: B1 + k2_x2: [C1] + k3_x3: bar + + +* By default, match keys that equal any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1 + + target: ['k0_x0', 'k1_x1'] + result: "{{ input | community.general.keep_keys(target=target) }}" + + +gives + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - {k0_x0: A0, k1_x1: B0} + - {k0_x0: A1, k1_x1: B1} + + +.. versionadded:: 9.1.0 + +* The results of the below examples 1-5 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - {k0_x0: A0, k1_x1: B0} + - {k0_x0: A1, k1_x1: B1} + + +1. Match keys that equal any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: equal + target: ['k0_x0', 'k1_x1'] + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +2. Match keys that start with any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: starts_with + target: ['k0', 'k1'] + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +3. Match keys that end with any of the items in target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: ends_with + target: ['x0', 'x1'] + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +4. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ['^.*[01]_x.*$'] + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +5. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ^.*[01]_x.*$ + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + + +* The results of the below examples 6-9 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - {k0_x0: A0} + - {k0_x0: A1} + + +6. Match keys that equal the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: equal + target: k0_x0 + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +7. Match keys that start with the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: starts_with + target: k0 + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +8. Match keys that end with the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: ends_with + target: x0 + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + +9. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ^.*0_x.*$ + result: "{{ input | community.general.keep_keys(target=target, matching_parameter=mp) }}" + diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst new file mode 100644 index 0000000000..4ac87ab79c --- /dev/null +++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-remove_keys.rst @@ -0,0 +1,159 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +remove_keys +""""""""""" + +Use the filter :ansplugin:`community.general.remove_keys#filter` if you have a list of dictionaries and want to remove certain keys. + +.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See See :ansplugin:`the documentation for the community.general.yaml callback plugin `. + + +Let us use the below list in the following examples: + +.. code-block:: yaml + + input: + - k0_x0: A0 + k1_x1: B0 + k2_x2: [C0] + k3_x3: foo + - k0_x0: A1 + k1_x1: B1 + k2_x2: [C1] + k3_x3: bar + + +* By default, match keys that equal any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1 + + target: ['k0_x0', 'k1_x1'] + result: "{{ input | community.general.remove_keys(target=target) }}" + + +gives + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - k2_x2: [C0] + k3_x3: foo + - k2_x2: [C1] + k3_x3: bar + + +.. versionadded:: 9.1.0 + +* The results of the below examples 1-5 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - k2_x2: [C0] + k3_x3: foo + - k2_x2: [C1] + k3_x3: bar + + +1. Match keys that equal any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: equal + target: ['k0_x0', 'k1_x1'] + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +2. Match keys that start with any of the items in the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: starts_with + target: ['k0', 'k1'] + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +3. Match keys that end with any of the items in target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: ends_with + target: ['x0', 'x1'] + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +4. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ['^.*[01]_x.*$'] + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +5. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ^.*[01]_x.*$ + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + + +* The results of the below examples 6-9 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - k1_x1: B0 + k2_x2: [C0] + k3_x3: foo + - k1_x1: B1 + k2_x2: [C1] + k3_x3: bar + + +6. Match keys that equal the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: equal + target: k0_x0 + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +7. Match keys that start with the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: starts_with + target: k0 + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +8. Match keys that end with the target. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: ends_with + target: x0 + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + +9. Match keys by the regex. + +.. code-block:: yaml+jinja + :emphasize-lines: 1,2 + + mp: regex + target: ^.*0_x.*$ + result: "{{ input | community.general.remove_keys(target=target, matching_parameter=mp) }}" + diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst new file mode 100644 index 0000000000..d0eb202bfe --- /dev/null +++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries-replace_keys.rst @@ -0,0 +1,175 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +replace_keys +"""""""""""" + +Use the filter :ansplugin:`community.general.replace_keys#filter` if you have a list of dictionaries and want to replace certain keys. + +.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ansplugin:`the documentation for the community.general.yaml callback plugin `. + + +Let us use the below list in the following examples: + +.. code-block:: yaml + + input: + - k0_x0: A0 + k1_x1: B0 + k2_x2: [C0] + k3_x3: foo + - k0_x0: A1 + k1_x1: B1 + k2_x2: [C1] + k3_x3: bar + + +* By default, match keys that equal any of the attributes before. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-3 + + target: + - {after: a0, before: k0_x0} + - {after: a1, before: k1_x1} + + result: "{{ input | community.general.replace_keys(target=target) }}" + + +gives + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - a0: A0 + a1: B0 + k2_x2: [C0] + k3_x3: foo + - a0: A1 + a1: B1 + k2_x2: [C1] + k3_x3: bar + + +.. versionadded:: 9.1.0 + +* The results of the below examples 1-3 are all the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - a0: A0 + a1: B0 + k2_x2: [C0] + k3_x3: foo + - a0: A1 + a1: B1 + k2_x2: [C1] + k3_x3: bar + + +1. Replace keys that starts with any of the attributes before. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-4 + + mp: starts_with + target: + - {after: a0, before: k0} + - {after: a1, before: k1} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + +2. Replace keys that ends with any of the attributes before. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-4 + + mp: ends_with + target: + - {after: a0, before: x0} + - {after: a1, before: x1} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + +3. Replace keys that match any regex of the attributes before. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-4 + + mp: regex + target: + - {after: a0, before: ^.*0_x.*$} + - {after: a1, before: ^.*1_x.*$} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + + +* The results of the below examples 4-5 are the same: + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - {X: foo} + - {X: bar} + + +4. If more keys match the same attribute before the last one will be used. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-3 + + mp: regex + target: + - {after: X, before: ^.*_x.*$} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + +5. If there are items with equal attribute before the first one will be used. + +.. code-block:: yaml+jinja + :emphasize-lines: 1-3 + + mp: regex + target: + - {after: X, before: ^.*_x.*$} + - {after: Y, before: ^.*_x.*$} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + + +6. If there are more matches for a key the first one will be used. + +.. code-block:: yaml + :emphasize-lines: 1- + + input: + - {aaa1: A, bbb1: B, ccc1: C} + - {aaa2: D, bbb2: E, ccc2: F} + + +.. code-block:: yaml+jinja + :emphasize-lines: 1-4 + + mp: starts_with + target: + - {after: X, before: a} + - {after: Y, before: aa} + + result: "{{ input | community.general.replace_keys(target=target, matching_parameter=mp) }}" + +gives + +.. code-block:: yaml + :emphasize-lines: 1- + + result: + - {X: A, bbb1: B, ccc1: C} + - {X: D, bbb2: E, ccc2: F} + + diff --git a/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst new file mode 100644 index 0000000000..64a82536d8 --- /dev/null +++ b/docs/docsite/rst/filter_guide-abstract_informations-lists_of_dictionaries.rst @@ -0,0 +1,18 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.filter_guide.filter_guide_abstract_informations.lists_of_dicts: + +Lists of dictionaries +^^^^^^^^^^^^^^^^^^^^^ + +Filters to manage keys in a list of dictionaries: + +.. toctree:: + :maxdepth: 1 + + filter_guide-abstract_informations-lists_of_dictionaries-keep_keys + filter_guide-abstract_informations-lists_of_dictionaries-remove_keys + filter_guide-abstract_informations-lists_of_dictionaries-replace_keys diff --git a/docs/docsite/rst/filter_guide.rst b/docs/docsite/rst/filter_guide.rst index 1c6468ddec..da8a90af3c 100644 --- a/docs/docsite/rst/filter_guide.rst +++ b/docs/docsite/rst/filter_guide.rst @@ -8,7 +8,7 @@ community.general Filter Guide ============================== -The :ref:`community.general collection ` offers several useful filter plugins. +The :anscollection:`community.general collection ` offers several useful filter plugins. .. toctree:: :maxdepth: 2 diff --git a/docs/docsite/rst/filter_guide_abstract_informations.rst b/docs/docsite/rst/filter_guide_abstract_informations.rst index 8f997f1637..818c09f02c 100644 --- a/docs/docsite/rst/filter_guide_abstract_informations.rst +++ b/docs/docsite/rst/filter_guide_abstract_informations.rst @@ -11,5 +11,7 @@ Abstract transformations filter_guide_abstract_informations_dictionaries filter_guide_abstract_informations_grouping + filter_guide-abstract_informations-lists_of_dictionaries filter_guide_abstract_informations_merging_lists_of_dictionaries + filter_guide_abstract_informations_lists_helper filter_guide_abstract_informations_counting_elements_in_sequence diff --git a/docs/docsite/rst/filter_guide_abstract_informations_counting_elements_in_sequence.rst b/docs/docsite/rst/filter_guide_abstract_informations_counting_elements_in_sequence.rst index dcadd5a793..98e8eb1c4d 100644 --- a/docs/docsite/rst/filter_guide_abstract_informations_counting_elements_in_sequence.rst +++ b/docs/docsite/rst/filter_guide_abstract_informations_counting_elements_in_sequence.rst @@ -6,7 +6,7 @@ Counting elements in a sequence ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The ``community.general.counter`` filter plugin allows you to count (hashable) elements in a sequence. Elements are returned as dictionary keys and their counts are stored as dictionary values. +The :ansplugin:`community.general.counter filter plugin ` allows you to count (hashable) elements in a sequence. Elements are returned as dictionary keys and their counts are stored as dictionary values. .. code-block:: yaml+jinja diff --git a/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst b/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst index 840bd1542c..e5b5bb7e36 100644 --- a/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst +++ b/docs/docsite/rst/filter_guide_abstract_informations_dictionaries.rst @@ -6,7 +6,7 @@ Dictionaries ^^^^^^^^^^^^ -You can use the ``dict_kv`` filter to create a single-entry dictionary with ``value | community.general.dict_kv(key)``: +You can use the :ansplugin:`community.general.dict_kv filter ` to create a single-entry dictionary with ``value | community.general.dict_kv(key)``: .. code-block:: yaml+jinja @@ -26,8 +26,8 @@ You can use the ``dict_kv`` filter to create a single-entry dictionary with ``va type: host database: all myservers: - - server1 - - server2 + - server1 + - server2 This produces: @@ -58,7 +58,7 @@ This produces: .. versionadded:: 2.0.0 -If you need to convert a list of key-value pairs to a dictionary, you can use the ``dict`` function. Unfortunately, this function cannot be used with ``map``. For this, the ``community.general.dict`` filter can be used: +If you need to convert a list of key-value pairs to a dictionary, you can use the ``dict`` function. Unfortunately, this function cannot be used with ``map``. For this, the :ansplugin:`community.general.dict filter ` can be used: .. code-block:: yaml+jinja diff --git a/docs/docsite/rst/filter_guide_abstract_informations_grouping.rst b/docs/docsite/rst/filter_guide_abstract_informations_grouping.rst index 2cea7f9bab..cb15989659 100644 --- a/docs/docsite/rst/filter_guide_abstract_informations_grouping.rst +++ b/docs/docsite/rst/filter_guide_abstract_informations_grouping.rst @@ -6,7 +6,7 @@ Grouping ^^^^^^^^ -If you have a list of dictionaries, the Jinja2 ``groupby`` filter allows to group the list by an attribute. This results in a list of ``(grouper, list)`` namedtuples, where ``list`` contains all dictionaries where the selected attribute equals ``grouper``. If you know that for every ``grouper``, there will be a most one entry in that list, you can use the ``community.general.groupby_as_dict`` filter to convert the original list into a dictionary which maps ``grouper`` to the corresponding dictionary. +If you have a list of dictionaries, the Jinja2 ``groupby`` filter allows to group the list by an attribute. This results in a list of ``(grouper, list)`` namedtuples, where ``list`` contains all dictionaries where the selected attribute equals ``grouper``. If you know that for every ``grouper``, there will be a most one entry in that list, you can use the :ansplugin:`community.general.groupby_as_dict filter ` to convert the original list into a dictionary which maps ``grouper`` to the corresponding dictionary. One example is ``ansible_facts.mounts``, which is a list of dictionaries where each has one ``device`` element to indicate the device which is mounted. Therefore, ``ansible_facts.mounts | community.general.groupby_as_dict('device')`` is a dictionary mapping a device to the mount information: diff --git a/docs/docsite/rst/filter_guide_abstract_informations_lists_helper.rst b/docs/docsite/rst/filter_guide_abstract_informations_lists_helper.rst new file mode 100644 index 0000000000..505320c79c --- /dev/null +++ b/docs/docsite/rst/filter_guide_abstract_informations_lists_helper.rst @@ -0,0 +1,81 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +Union, intersection and difference of lists +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Starting with Ansible Core 2.16, the builtin filters :ansplugin:`ansible.builtin.union#filter`, :ansplugin:`ansible.builtin.intersect#filter`, :ansplugin:`ansible.builtin.difference#filter` and :ansplugin:`ansible.builtin.symmetric_difference#filter` began to behave differently and do no longer preserve the item order. Items in the resulting lists are returned in arbitrary order and the order can vary between subsequent runs. + +The Ansible community.general collection provides the following additional list filters: + +- :ansplugin:`community.general.lists_union#filter` +- :ansplugin:`community.general.lists_intersect#filter` +- :ansplugin:`community.general.lists_difference#filter` +- :ansplugin:`community.general.lists_symmetric_difference#filter` + +These filters preserve the item order, eliminate duplicates and are an extended version of the builtin ones, because they can operate on more than two lists. + +.. note:: Stick to the builtin filters, when item order is not important or when you do not need the n-ary operating mode. The builtin filters are faster, because they rely mostly on sets as their underlying datastructure. + +Let us use the lists below in the following examples: + +.. code-block:: yaml + + A: [9, 5, 7, 1, 9, 4, 10, 5, 9, 7] + B: [4, 1, 2, 8, 3, 1, 7] + C: [10, 2, 1, 9, 1] + +The union of ``A`` and ``B`` can be written as: + +.. code-block:: yaml+jinja + + result: "{{ A | community.general.lists_union(B) }}" + +This statement produces: + +.. code-block:: yaml + + result: [9, 5, 7, 1, 4, 10, 2, 8, 3] + +If you want to calculate the intersection of ``A``, ``B`` and ``C``, you can use the following statement: + +.. code-block:: yaml+jinja + + result: "{{ A | community.general.lists_intersect(B, C) }}" + +Alternatively, you can use a list of lists as an input of the filter + +.. code-block:: yaml+jinja + + result: "{{ [A, B] | community.general.lists_intersect(C) }}" + +or + +.. code-block:: yaml+jinja + + result: "{{ [A, B, C] | community.general.lists_intersect(flatten=true) }}" + +All three statements are equivalent and give: + +.. code-block:: yaml + + result: [1] + +.. note:: Be aware that in most cases, filter calls without any argument require ``flatten=true``, otherwise the input is returned as result. The reason for this is, that the input is considered as a variable argument and is wrapped by an additional outer list. ``flatten=true`` ensures that this list is removed before the input is processed by the filter logic. + +The filters :ansplugin:`community.general.lists_difference#filter` or :ansplugin:`community.general.lists_symmetric_difference#filter` can be used in the same way as the filters in the examples above. They calculate the difference or the symmetric difference between two or more lists and preserve the item order. + +For example, the symmetric difference of ``A``, ``B`` and ``C`` may be written as: + +.. code-block:: yaml+jinja + + result: "{{ A | community.general.lists_symmetric_difference(B, C) }}" + +This gives: + +.. code-block:: yaml + + result: [5, 8, 3, 1] + diff --git a/docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst b/docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst index 9b56e98d7e..cafe04e5c4 100644 --- a/docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst +++ b/docs/docsite/rst/filter_guide_abstract_informations_merging_lists_of_dictionaries.rst @@ -6,33 +6,30 @@ Merging lists of dictionaries ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you have two or more lists of dictionaries and want to combine them into a list of merged dictionaries, where the dictionaries are merged by an attribute, you can use the ``lists_mergeby`` filter. +If you have two or more lists of dictionaries and want to combine them into a list of merged dictionaries, where the dictionaries are merged by an attribute, you can use the :ansplugin:`community.general.lists_mergeby ` filter. -.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See :ref:`the documentation for the community.general.yaml callback plugin `. +.. note:: The output of the examples in this section use the YAML callback plugin. Quoting: "Ansible output that can be quite a bit easier to read than the default JSON formatting." See the documentation for the :ansplugin:`community.general.yaml callback plugin `. Let us use the lists below in the following examples: .. code-block:: yaml list1: - - name: foo - extra: true - - name: bar - extra: false - - name: meh - extra: true + - {name: foo, extra: true} + - {name: bar, extra: false} + - {name: meh, extra: true} list2: - - name: foo - path: /foo - - name: baz - path: /baz + - {name: foo, path: /foo} + - {name: baz, path: /baz} +Two lists +""""""""" In the example below the lists are merged by the attribute ``name``: .. code-block:: yaml+jinja - list3: "{{ list1| + list3: "{{ list1 | community.general.lists_mergeby(list2, 'name') }}" This produces: @@ -40,24 +37,21 @@ This produces: .. code-block:: yaml list3: - - extra: false - name: bar - - name: baz - path: /baz - - extra: true - name: foo - path: /foo - - extra: true - name: meh + - {name: bar, extra: false} + - {name: baz, path: /baz} + - {name: foo, extra: true, path: /foo} + - {name: meh, extra: true} .. versionadded:: 2.0.0 +List of two lists +""""""""""""""""" It is possible to use a list of lists as an input of the filter: .. code-block:: yaml+jinja - list3: "{{ [list1, list2]| + list3: "{{ [list1, list2] | community.general.lists_mergeby('name') }}" This produces the same result as in the previous example: @@ -65,26 +59,40 @@ This produces the same result as in the previous example: .. code-block:: yaml list3: - - extra: false - name: bar - - name: baz - path: /baz - - extra: true - name: foo - path: /foo - - extra: true - name: meh + - {name: bar, extra: false} + - {name: baz, path: /baz} + - {name: foo, extra: true, path: /foo} + - {name: meh, extra: true} + +Single list +""""""""""" +It is possible to merge single list: + +.. code-block:: yaml+jinja + + list3: "{{ [list1 + list2, []] | + community.general.lists_mergeby('name') }}" + +This produces the same result as in the previous example: + +.. code-block:: yaml + + list3: + - {name: bar, extra: false} + - {name: baz, path: /baz} + - {name: foo, extra: true, path: /foo} + - {name: meh, extra: true} -The filter also accepts two optional parameters: ``recursive`` and ``list_merge``. These parameters are only supported when used with ansible-base 2.10 or ansible-core, but not with Ansible 2.9. This is available since community.general 4.4.0. +The filter also accepts two optional parameters: :ansopt:`community.general.lists_mergeby#filter:recursive` and :ansopt:`community.general.lists_mergeby#filter:list_merge`. This is available since community.general 4.4.0. **recursive** - Is a boolean, default to ``False``. Should the ``community.general.lists_mergeby`` recursively merge nested hashes. Note: It does not depend on the value of the ``hash_behaviour`` setting in ``ansible.cfg``. + Is a boolean, default to ``false``. Should the :ansplugin:`community.general.lists_mergeby#filter` filter recursively merge nested hashes. Note: It does not depend on the value of the ``hash_behaviour`` setting in ``ansible.cfg``. **list_merge** - Is a string, its possible values are ``replace`` (default), ``keep``, ``append``, ``prepend``, ``append_rp`` or ``prepend_rp``. It modifies the behaviour of ``community.general.lists_mergeby`` when the hashes to merge contain arrays/lists. + Is a string, its possible values are :ansval:`replace` (default), :ansval:`keep`, :ansval:`append`, :ansval:`prepend`, :ansval:`append_rp` or :ansval:`prepend_rp`. It modifies the behaviour of :ansplugin:`community.general.lists_mergeby#filter` when the hashes to merge contain arrays/lists. -The examples below set ``recursive=true`` and display the differences among all six options of ``list_merge``. Functionality of the parameters is exactly the same as in the filter ``combine``. See :ref:`Combining hashes/dictionaries ` to learn details about these options. +The examples below set :ansopt:`community.general.lists_mergeby#filter:recursive=true` and display the differences among all six options of :ansopt:`community.general.lists_mergeby#filter:list_merge`. Functionality of the parameters is exactly the same as in the filter :ansplugin:`ansible.builtin.combine#filter`. See :ref:`Combining hashes/dictionaries ` to learn details about these options. Let us use the lists below in the following examples @@ -95,8 +103,7 @@ Let us use the lists below in the following examples param01: x: default_value y: default_value - list: - - default_value + list: [default_value] - name: myname02 param01: [1, 1, 2, 3] @@ -105,16 +112,17 @@ Let us use the lists below in the following examples param01: y: patch_value z: patch_value - list: - - patch_value + list: [patch_value] - name: myname02 - param01: [3, 4, 4, {key: value}] + param01: [3, 4, 4] -Example ``list_merge=replace`` (default): +list_merge=replace (default) +"""""""""""""""""""""""""""" +Example :ansopt:`community.general.lists_mergeby#filter:list_merge=replace` (default): .. code-block:: yaml+jinja - list3: "{{ [list1, list2]| + list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true) }}" @@ -123,25 +131,22 @@ This produces: .. code-block:: yaml list3: - - name: myname01 - param01: - list: - - patch_value - x: default_value - y: patch_value - z: patch_value - - name: myname02 - param01: - - 3 - - 4 - - 4 - - key: value + - name: myname01 + param01: + x: default_value + y: patch_value + list: [patch_value] + z: patch_value + - name: myname02 + param01: [3, 4, 4] -Example ``list_merge=keep``: +list_merge=keep +""""""""""""""" +Example :ansopt:`community.general.lists_mergeby#filter:list_merge=keep`: .. code-block:: yaml+jinja - list3: "{{ [list1, list2]| + list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='keep') }}" @@ -151,25 +156,22 @@ This produces: .. code-block:: yaml list3: - - name: myname01 - param01: - list: - - default_value - x: default_value - y: patch_value - z: patch_value - - name: myname02 - param01: - - 1 - - 1 - - 2 - - 3 + - name: myname01 + param01: + x: default_value + y: patch_value + list: [default_value] + z: patch_value + - name: myname02 + param01: [1, 1, 2, 3] -Example ``list_merge=append``: +list_merge=append +""""""""""""""""" +Example :ansopt:`community.general.lists_mergeby#filter:list_merge=append`: .. code-block:: yaml+jinja - list3: "{{ [list1, list2]| + list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='append') }}" @@ -179,30 +181,22 @@ This produces: .. code-block:: yaml list3: - - name: myname01 - param01: - list: - - default_value - - patch_value - x: default_value - y: patch_value - z: patch_value - - name: myname02 - param01: - - 1 - - 1 - - 2 - - 3 - - 3 - - 4 - - 4 - - key: value + - name: myname01 + param01: + x: default_value + y: patch_value + list: [default_value, patch_value] + z: patch_value + - name: myname02 + param01: [1, 1, 2, 3, 3, 4, 4] -Example ``list_merge=prepend``: +list_merge=prepend +"""""""""""""""""" +Example :ansopt:`community.general.lists_mergeby#filter:list_merge=prepend`: .. code-block:: yaml+jinja - list3: "{{ [list1, list2]| + list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='prepend') }}" @@ -212,30 +206,22 @@ This produces: .. code-block:: yaml list3: - - name: myname01 - param01: - list: - - patch_value - - default_value - x: default_value - y: patch_value - z: patch_value - - name: myname02 - param01: - - 3 - - 4 - - 4 - - key: value - - 1 - - 1 - - 2 - - 3 + - name: myname01 + param01: + x: default_value + y: patch_value + list: [patch_value, default_value] + z: patch_value + - name: myname02 + param01: [3, 4, 4, 1, 1, 2, 3] -Example ``list_merge=append_rp``: +list_merge=append_rp +"""""""""""""""""""" +Example :ansopt:`community.general.lists_mergeby#filter:list_merge=append_rp`: .. code-block:: yaml+jinja - list3: "{{ [list1, list2]| + list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='append_rp') }}" @@ -245,29 +231,22 @@ This produces: .. code-block:: yaml list3: - - name: myname01 - param01: - list: - - default_value - - patch_value - x: default_value - y: patch_value - z: patch_value - - name: myname02 - param01: - - 1 - - 1 - - 2 - - 3 - - 4 - - 4 - - key: value + - name: myname01 + param01: + x: default_value + y: patch_value + list: [default_value, patch_value] + z: patch_value + - name: myname02 + param01: [1, 1, 2, 3, 4, 4] -Example ``list_merge=prepend_rp``: +list_merge=prepend_rp +""""""""""""""""""""" +Example :ansopt:`community.general.lists_mergeby#filter:list_merge=prepend_rp`: .. code-block:: yaml+jinja - list3: "{{ [list1, list2]| + list3: "{{ [list1, list2] | community.general.lists_mergeby('name', recursive=true, list_merge='prepend_rp') }}" @@ -277,21 +256,12 @@ This produces: .. code-block:: yaml list3: - - name: myname01 - param01: - list: - - patch_value - - default_value - x: default_value - y: patch_value - z: patch_value - - name: myname02 - param01: - - 3 - - 4 - - 4 - - key: value - - 1 - - 1 - - 2 + - name: myname01 + param01: + x: default_value + y: patch_value + list: [patch_value, default_value] + z: patch_value + - name: myname02 + param01: [3, 4, 4, 1, 1, 2] diff --git a/docs/docsite/rst/filter_guide_conversions.rst b/docs/docsite/rst/filter_guide_conversions.rst index 78970c17b9..ca0401762c 100644 --- a/docs/docsite/rst/filter_guide_conversions.rst +++ b/docs/docsite/rst/filter_guide_conversions.rst @@ -9,7 +9,7 @@ Conversions Parsing CSV files ^^^^^^^^^^^^^^^^^ -Ansible offers the :ref:`community.general.read_csv module ` to read CSV files. Sometimes you need to convert strings to CSV files instead. For this, the ``from_csv`` filter exists. +Ansible offers the :ansplugin:`community.general.read_csv module ` to read CSV files. Sometimes you need to convert strings to CSV files instead. For this, the :ansplugin:`community.general.from_csv filter ` exists. .. code-block:: yaml+jinja @@ -42,7 +42,7 @@ This produces: ] } -The ``from_csv`` filter has several keyword arguments to control its behavior: +The :ansplugin:`community.general.from_csv filter ` has several keyword arguments to control its behavior: :dialect: Dialect of the CSV file. Default is ``excel``. Other possible choices are ``excel-tab`` and ``unix``. If one of ``delimiter``, ``skipinitialspace`` or ``strict`` is specified, ``dialect`` is ignored. :fieldnames: A set of column names to use. If not provided, the first line of the CSV is assumed to contain the column names. @@ -55,7 +55,7 @@ The ``from_csv`` filter has several keyword arguments to control its behavior: Converting to JSON ^^^^^^^^^^^^^^^^^^ -`JC `_ is a CLI tool and Python library which allows to interpret output of various CLI programs as JSON. It is also available as a filter in community.general. This filter needs the `jc Python library `_ installed on the controller. +`JC `_ is a CLI tool and Python library which allows to interpret output of various CLI programs as JSON. It is also available as a filter in community.general, called :ansplugin:`community.general.jc#filter`. This filter needs the `jc Python library `_ installed on the controller. .. code-block:: yaml+jinja diff --git a/docs/docsite/rst/filter_guide_creating_identifiers.rst b/docs/docsite/rst/filter_guide_creating_identifiers.rst index af0a8b7bab..6e0c730c60 100644 --- a/docs/docsite/rst/filter_guide_creating_identifiers.rst +++ b/docs/docsite/rst/filter_guide_creating_identifiers.rst @@ -11,7 +11,7 @@ The following filters allow to create identifiers. Hashids ^^^^^^^ -`Hashids `_ allow to convert sequences of integers to short unique string identifiers. This filter needs the `hashids Python library `_ installed on the controller. +`Hashids `_ allow to convert sequences of integers to short unique string identifiers. The :ansplugin:`community.general.hashids_encode#filter` and :ansplugin:`community.general.hashids_decode#filter` filters need the `hashids Python library `_ installed on the controller. .. code-block:: yaml+jinja @@ -52,7 +52,7 @@ The hashids filters accept keyword arguments to allow fine-tuning the hashids ge Random MACs ^^^^^^^^^^^ -You can use the ``random_mac`` filter to complete a partial `MAC address `_ to a random 6-byte MAC address. +You can use the :ansplugin:`community.general.random_mac filter ` to complete a partial `MAC address `_ to a random 6-byte MAC address. .. code-block:: yaml+jinja diff --git a/docs/docsite/rst/filter_guide_paths.rst b/docs/docsite/rst/filter_guide_paths.rst index dac8931454..41185832f2 100644 --- a/docs/docsite/rst/filter_guide_paths.rst +++ b/docs/docsite/rst/filter_guide_paths.rst @@ -6,14 +6,4 @@ Paths ----- -The ``path_join`` filter has been added in ansible-base 2.10. If you want to use this filter, but also need to support Ansible 2.9, you can use ``community.general``'s ``path_join`` shim, ``community.general.path_join``. This filter redirects to ``path_join`` for ansible-base 2.10 and ansible-core 2.11 or newer, and re-implements the filter for Ansible 2.9. - -.. code-block:: yaml+jinja - - # ansible-base 2.10 or newer: - path: {{ ('/etc', path, 'subdir', file) | path_join }} - - # Also works with Ansible 2.9: - path: {{ ('/etc', path, 'subdir', file) | community.general.path_join }} - -.. versionadded:: 3.0.0 +The :ansplugin:`ansible.builtin.path_join filter ` has been added in ansible-base 2.10. Community.general 3.0.0 and newer contains an alias ``community.general.path_join`` for this filter that could be used on Ansible 2.9 as well. Since community.general no longer supports Ansible 2.9, this is now a simple redirect to :ansplugin:`ansible.builtin.path_join filter `. diff --git a/docs/docsite/rst/filter_guide_selecting_json_data.rst b/docs/docsite/rst/filter_guide_selecting_json_data.rst index d8de07b926..bdf2624f3c 100644 --- a/docs/docsite/rst/filter_guide_selecting_json_data.rst +++ b/docs/docsite/rst/filter_guide_selecting_json_data.rst @@ -8,7 +8,7 @@ Selecting JSON data: JSON queries --------------------------------- -To select a single element or a data subset from a complex data structure in JSON format (for example, Ansible facts), use the ``json_query`` filter. The ``json_query`` filter lets you query a complex JSON structure and iterate over it using a loop structure. +To select a single element or a data subset from a complex data structure in JSON format (for example, Ansible facts), use the :ansplugin:`community.general.json_query filter `. The :ansplugin:`community.general.json_query#filter` filter lets you query a complex JSON structure and iterate over it using a loop structure. .. note:: You must manually install the **jmespath** dependency on the Ansible controller before using this filter. This filter is built upon **jmespath**, and you can use the same syntax. For examples, see `jmespath examples `_. @@ -17,50 +17,50 @@ Consider this data structure: .. code-block:: yaml+jinja { - "domain_definition": { - "domain": { - "cluster": [ - { - "name": "cluster1" - }, - { - "name": "cluster2" - } - ], - "server": [ - { - "name": "server11", - "cluster": "cluster1", - "port": "8080" - }, - { - "name": "server12", - "cluster": "cluster1", - "port": "8090" - }, - { - "name": "server21", - "cluster": "cluster2", - "port": "9080" - }, - { - "name": "server22", - "cluster": "cluster2", - "port": "9090" - } - ], - "library": [ - { - "name": "lib1", - "target": "cluster1" - }, - { - "name": "lib2", - "target": "cluster2" - } - ] + "domain_definition": { + "domain": { + "cluster": [ + { + "name": "cluster1" + }, + { + "name": "cluster2" } + ], + "server": [ + { + "name": "server11", + "cluster": "cluster1", + "port": "8080" + }, + { + "name": "server12", + "cluster": "cluster1", + "port": "8090" + }, + { + "name": "server21", + "cluster": "cluster2", + "port": "9080" + }, + { + "name": "server22", + "cluster": "cluster2", + "port": "9090" + } + ], + "library": [ + { + "name": "lib1", + "target": "cluster1" + }, + { + "name": "lib2", + "target": "cluster2" + } + ] } + } } To extract all clusters from this structure, you can use the following query: @@ -124,7 +124,7 @@ To get a hash map with all ports and names of a cluster: var: item loop: "{{ domain_definition | community.general.json_query(server_name_cluster1_query) }}" vars: - server_name_cluster1_query: "domain.server[?cluster=='cluster2'].{name: name, port: port}" + server_name_cluster1_query: "domain.server[?cluster=='cluster1'].{name: name, port: port}" To extract ports from all clusters with name starting with 'server1': @@ -146,4 +146,4 @@ To extract ports from all clusters with name containing 'server1': vars: server_name_query: "domain.server[?contains(name,'server1')].port" -.. note:: while using ``starts_with`` and ``contains``, you have to use `` to_json | from_json `` filter for correct parsing of data structure. +.. note:: while using ``starts_with`` and ``contains``, you have to use ``to_json | from_json`` filter for correct parsing of data structure. diff --git a/docs/docsite/rst/filter_guide_working_with_times.rst b/docs/docsite/rst/filter_guide_working_with_times.rst index dc68f2a2e3..032d44bb57 100644 --- a/docs/docsite/rst/filter_guide_working_with_times.rst +++ b/docs/docsite/rst/filter_guide_working_with_times.rst @@ -6,9 +6,9 @@ Working with times ------------------ -The ``to_time_unit`` filter allows to convert times from a human-readable string to a unit. For example, ``'4h 30min 12second' | community.general.to_time_unit('hour')`` gives the number of hours that correspond to 4 hours, 30 minutes and 12 seconds. +The :ansplugin:`community.general.to_time_unit filter ` allows to convert times from a human-readable string to a unit. For example, ``'4h 30min 12second' | community.general.to_time_unit('hour')`` gives the number of hours that correspond to 4 hours, 30 minutes and 12 seconds. -There are shorthands to directly convert to various units, like ``to_hours``, ``to_minutes``, ``to_seconds``, and so on. The following table lists all units that can be used: +There are shorthands to directly convert to various units, like :ansplugin:`community.general.to_hours#filter`, :ansplugin:`community.general.to_minutes#filter`, :ansplugin:`community.general.to_seconds#filter`, and so on. The following table lists all units that can be used: .. list-table:: Units :widths: 25 25 25 25 @@ -21,37 +21,37 @@ There are shorthands to directly convert to various units, like ``to_hours``, `` * - Millisecond - 1/1000 second - ``ms``, ``millisecond``, ``milliseconds``, ``msec``, ``msecs``, ``msecond``, ``mseconds`` - - ``to_milliseconds`` + - :ansplugin:`community.general.to_milliseconds#filter` * - Second - 1 second - ``s``, ``sec``, ``secs``, ``second``, ``seconds`` - - ``to_seconds`` + - :ansplugin:`community.general.to_seconds#filter` * - Minute - 60 seconds - ``m``, ``min``, ``mins``, ``minute``, ``minutes`` - - ``to_minutes`` + - :ansplugin:`community.general.to_minutes#filter` * - Hour - 60*60 seconds - ``h``, ``hour``, ``hours`` - - ``to_hours`` + - :ansplugin:`community.general.to_hours#filter` * - Day - 24*60*60 seconds - ``d``, ``day``, ``days`` - - ``to_days`` + - :ansplugin:`community.general.to_days#filter` * - Week - 7*24*60*60 seconds - ``w``, ``week``, ``weeks`` - - ``to_weeks`` + - :ansplugin:`community.general.to_weeks#filter` * - Month - 30*24*60*60 seconds - ``mo``, ``month``, ``months`` - - ``to_months`` + - :ansplugin:`community.general.to_months#filter` * - Year - 365*24*60*60 seconds - ``y``, ``year``, ``years`` - - ``to_years`` + - :ansplugin:`community.general.to_years#filter` -Note that months and years are using a simplified representation: a month is 30 days, and a year is 365 days. If you need different definitions of months or years, you can pass them as keyword arguments. For example, if you want a year to be 365.25 days, and a month to be 30.5 days, you can write ``'11months 4' | community.general.to_years(year=365.25, month=30.5)``. These keyword arguments can be specified to ``to_time_unit`` and to all shorthand filters. +Note that months and years are using a simplified representation: a month is 30 days, and a year is 365 days. If you need different definitions of months or years, you can pass them as keyword arguments. For example, if you want a year to be 365.25 days, and a month to be 30.5 days, you can write ``'11months 4' | community.general.to_years(year=365.25, month=30.5)``. These keyword arguments can be specified to :ansplugin:`community.general.to_time_unit#filter` and to all shorthand filters. .. code-block:: yaml+jinja diff --git a/docs/docsite/rst/filter_guide_working_with_unicode.rst b/docs/docsite/rst/filter_guide_working_with_unicode.rst index 2e5a67f8fa..e75b0f871b 100644 --- a/docs/docsite/rst/filter_guide_working_with_unicode.rst +++ b/docs/docsite/rst/filter_guide_working_with_unicode.rst @@ -6,9 +6,9 @@ Working with Unicode --------------------- -`Unicode `_ makes it possible to produce two strings which may be visually equivalent, but are comprised of distinctly different characters/character sequences. To address this ``Unicode`` defines `normalization forms `_ which avoid these distinctions by choosing a unique character sequence for a given visual representation. +`Unicode `_ makes it possible to produce two strings which may be visually equivalent, but are comprised of distinctly different characters/character sequences. To address this Unicode defines `normalization forms `_ which avoid these distinctions by choosing a unique character sequence for a given visual representation. -You can use the ``community.general.unicode_normalize`` filter to normalize ``Unicode`` strings within your playbooks. +You can use the :ansplugin:`community.general.unicode_normalize filter ` to normalize Unicode strings within your playbooks. .. code-block:: yaml+jinja @@ -28,7 +28,7 @@ This produces: "msg": true } -The ``community.general.unicode_normalize`` filter accepts a keyword argument to select the ``Unicode`` form used to normalize the input string. +The :ansplugin:`community.general.unicode_normalize filter ` accepts a keyword argument :ansopt:`community.general.unicode_normalize#filter:form` to select the Unicode form used to normalize the input string. :form: One of ``'NFC'`` (default), ``'NFD'``, ``'NFKC'``, or ``'NFKD'``. See the `Unicode reference `_ for more information. diff --git a/docs/docsite/rst/filter_guide_working_with_versions.rst b/docs/docsite/rst/filter_guide_working_with_versions.rst index 2488427b73..055bbcd217 100644 --- a/docs/docsite/rst/filter_guide_working_with_versions.rst +++ b/docs/docsite/rst/filter_guide_working_with_versions.rst @@ -6,7 +6,7 @@ Working with versions --------------------- -If you need to sort a list of version numbers, the Jinja ``sort`` filter is problematic. Since it sorts lexicographically, ``2.10`` will come before ``2.9``. To treat version numbers correctly, you can use the ``version_sort`` filter: +If you need to sort a list of version numbers, the Jinja ``sort`` filter is problematic. Since it sorts lexicographically, ``2.10`` will come before ``2.9``. To treat version numbers correctly, you can use the :ansplugin:`community.general.version_sort filter `: .. code-block:: yaml+jinja diff --git a/docs/docsite/rst/guide_alicloud.rst b/docs/docsite/rst/guide_alicloud.rst new file mode 100644 index 0000000000..b5ce2c063c --- /dev/null +++ b/docs/docsite/rst/guide_alicloud.rst @@ -0,0 +1,96 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_alicloud: + +Alibaba Cloud Compute Services Guide +==================================== + +Introduction +```````````` + +The community.general collection contains several modules for controlling and managing Alibaba Cloud Compute Services (Alicloud). This guide +explains how to use the Alicloud Ansible modules together. + +All Alicloud modules require ``footmark`` - install it on your control machine with ``pip install footmark``. + +Cloud modules, including Alicloud modules, are usually executed on your local machine (the control machine) with ``connection: local``, rather than on remote machines defined in your hosts. + +Normally, you'll use the following pattern for plays that provision Alicloud resources: + +.. code-block:: yaml + + - hosts: localhost + connection: local + vars: + - ... + tasks: + - ... + +Authentication +`````````````` + +You can specify your Alicloud authentication credentials (access key and secret key) by passing them as +environment variables or by storing them in a vars file. + +To pass authentication credentials as environment variables: + +.. code-block:: console + + export ALICLOUD_ACCESS_KEY='Alicloud123' + export ALICLOUD_SECRET_KEY='AlicloudSecret123' + +To store authentication credentials in a vars file, encrypt them with :ref:`Ansible Vault ` to keep them secure, then list them: + +.. code-block:: yaml + + --- + alicloud_access_key: "--REMOVED--" + alicloud_secret_key: "--REMOVED--" + +Note that if you store your credentials in a vars file, you need to refer to them in each Alicloud module. For example: + +.. code-block:: yaml+jinja + + - community.general.ali_instance: + alicloud_access_key: "{{ alicloud_access_key }}" + alicloud_secret_key: "{{ alicloud_secret_key }}" + image_id: "..." + +Provisioning +```````````` + +Alicloud modules create Alicloud ECS instances (:ansplugin:`community.general.ali_instance#module`) and retrieve information on these (:ansplugin:`community.general.ali_instance_info#module`). + +You can use the ``count`` parameter to control the number of resources you create or terminate. For example, if you want exactly 5 instances tagged ``NewECS``, set the ``count`` of instances to 5 and the ``count_tag`` to ``NewECS``, as shown in the last task of the example playbook below. If there are no instances with the tag ``NewECS``, the task creates 5 new instances. If there are 2 instances with that tag, the task creates 3 more. If there are 8 instances with that tag, the task terminates 3 of those instances. + +If you do not specify a ``count_tag``, the task creates the number of instances you specify in ``count`` with the ``instance_name`` you provide. + +.. code-block:: yaml+jinja + + # alicloud_setup.yml + + - hosts: localhost + connection: local + + tasks: + - name: Create a set of instances + community.general.ali_instance: + instance_type: ecs.n4.small + image_id: "{{ ami_id }}" + instance_name: "My-new-instance" + instance_tags: + Name: NewECS + Version: 0.0.1 + count: 5 + count_tag: + Name: NewECS + allocate_public_ip: true + max_bandwidth_out: 50 + register: create_instance + +In the example playbook above, data about the instances created by this playbook is saved in the variable defined by the ``register`` keyword in the task. + +Each Alicloud module offers a variety of parameter options. Not all options are demonstrated in the above example. See each individual module for further details and examples. diff --git a/docs/docsite/rst/guide_cmdrunner.rst b/docs/docsite/rst/guide_cmdrunner.rst new file mode 100644 index 0000000000..c1514ee340 --- /dev/null +++ b/docs/docsite/rst/guide_cmdrunner.rst @@ -0,0 +1,529 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_cmdrunner: + + +Command Runner guide +==================== + + +Introduction +^^^^^^^^^^^^ + +The ``ansible_collections.community.general.plugins.module_utils.cmd_runner`` module util provides the +``CmdRunner`` class to help execute external commands. The class is a wrapper around +the standard ``AnsibleModule.run_command()`` method, handling command arguments, localization setting, +output processing output, check mode, and other features. + +It is even more useful when one command is used in multiple modules, so that you can define all options +in a module util file, and each module uses the same runner with different arguments. + +For the sake of clarity, throughout this guide, unless otherwise specified, we use the term *option* when referring to +Ansible module options, and the term *argument* when referring to the command line arguments for the external command. + + +Quickstart +"""""""""" + +``CmdRunner`` defines a command and a set of coded instructions on how to format +the command-line arguments, in which specific order, for a particular execution. +It relies on ``ansible.module_utils.basic.AnsibleModule.run_command()`` to actually execute the command. +There are other features, see more details throughout this document. + +To use ``CmdRunner`` you must start by creating an object. The example below is a simplified +version of the actual code in :ansplugin:`community.general.ansible_galaxy_install#module`: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + runner = CmdRunner( + module, + command="ansible-galaxy", + arg_formats=dict( + type=cmd_runner_fmt.as_func(lambda v: [] if v == 'both' else [v]), + galaxy_cmd=cmd_runner_fmt.as_list(), + upgrade=cmd_runner_fmt.as_bool("--upgrade"), + requirements_file=cmd_runner_fmt.as_opt_val('-r'), + dest=cmd_runner_fmt.as_opt_val('-p'), + force=cmd_runner_fmt.as_bool("--force"), + no_deps=cmd_runner_fmt.as_bool("--no-deps"), + version=cmd_runner_fmt.as_fixed("--version"), + name=cmd_runner_fmt.as_list(), + ) + ) + +This is meant to be done once, then every time you need to execute the command you create a context and pass values as needed: + +.. code-block:: python + + # Run the command with these arguments, when values exist for them + with runner("type galaxy_cmd upgrade force no_deps dest requirements_file name", output_process=process) as ctx: + ctx.run(galaxy_cmd="install", upgrade=upgrade) + + # version is fixed, requires no value + with runner("version") as ctx: + dummy, stdout, dummy = ctx.run() + + # passes arg 'data' to AnsibleModule.run_command() + with runner("type name", data=stdin_data) as ctx: + dummy, stdout, dummy = ctx.run() + + # Another way of expressing it + dummy, stdout, dummy = runner("version").run() + +Note that you can pass values for the arguments when calling ``run()``, otherwise ``CmdRunner`` +uses the module options with the exact same names to provide values for the runner arguments. +If no value is passed and no module option is found for the name specified, then an exception is raised, unless +the argument is using ``cmd_runner_fmt.as_fixed`` as format function like the ``version`` in the example above. +See more about it below. + +In the first example, values of ``type``, ``force``, ``no_deps`` and others +are taken straight from the module, whilst ``galaxy_cmd`` and ``upgrade`` are +passed explicitly. + +.. note:: + + It is not possible to automatically retrieve values of suboptions. + +That generates a resulting command line similar to (example taken from the +output of an integration test): + +.. code-block:: python + + [ + "/bin/ansible-galaxy", + "collection", + "install", + "--upgrade", + "-p", + "", + "netbox.netbox", + ] + + +Argument formats +^^^^^^^^^^^^^^^^ + +As seen in the example, ``CmdRunner`` expects a parameter named ``arg_formats`` +defining how to format each CLI named argument. +An "argument format" is nothing but a function to transform the value of a variable +into something formatted for the command line. + + +Argument format function +"""""""""""""""""""""""" + +An ``arg_format`` function is defined in the form similar to: + +.. code-block:: python + + def func(value): + return ["--some-param-name", value] + +The parameter ``value`` can be of any type - although there are convenience +mechanisms to help handling sequence and mapping objects. + +The result is expected to be of the type ``Sequence[str]`` type (most commonly +``list[str]`` or ``tuple[str]``), otherwise it is considered to be a ``str``, +and it is coerced into ``list[str]``. +This resulting sequence of strings is added to the command line when that +argument is actually used. + +For example, if ``func`` returns: + +- ``["nee", 2, "shruberries"]``, the command line adds arguments ``"nee" "2" "shruberries"``. +- ``2 == 2``, the command line adds argument ``True``. +- ``None``, the command line adds argument ``None``. +- ``[]``, the command line adds no command line argument for that particular argument. + + +Convenience format methods +"""""""""""""""""""""""""" + +In the same module as ``CmdRunner`` there is a class ``cmd_runner_fmt`` which +provides a set of convenience methods that return format functions for common cases. +In the first block of code in the `Quickstart`_ section you can see the importing of +that class: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + +The same example shows how to make use of some of them in the instantiation of the ``CmdRunner`` object. +A description of each one of the convenience methods available and examples of how to use them is found below. +In these descriptions ``value`` refers to the single parameter passed to the formatting function. + +- ``cmd_runner_fmt.as_list()`` + This method does not receive any parameter, function returns ``value`` as-is. + + - Creation: + ``cmd_runner_fmt.as_list()`` + - Examples: + +----------------------+---------------------+ + | Value | Outcome | + +======================+=====================+ + | ``["foo", "bar"]`` | ``["foo", "bar"]`` | + +----------------------+---------------------+ + | ``"foobar"`` | ``["foobar"]`` | + +----------------------+---------------------+ + +- ``cmd_runner_fmt.as_bool()`` + This method receives two different parameters: ``args_true`` and ``args_false``, latter being optional. + If the boolean evaluation of ``value`` is ``True``, the format function returns ``args_true``. + If the boolean evaluation is ``False``, then the function returns ``args_false`` if it was provided, or ``[]`` otherwise. + + - Creation (one arg): + ``cmd_runner_fmt.as_bool("--force")`` + - Examples: + +------------+--------------------+ + | Value | Outcome | + +============+====================+ + | ``True`` | ``["--force"]`` | + +------------+--------------------+ + | ``False`` | ``[]`` | + +------------+--------------------+ + - Creation (two args, ``None`` treated as ``False``): + ``cmd_runner_fmt.as_bool("--relax", "--dont-do-it")`` + - Examples: + +------------+----------------------+ + | Value | Outcome | + +============+======================+ + | ``True`` | ``["--relax"]`` | + +------------+----------------------+ + | ``False`` | ``["--dont-do-it"]`` | + +------------+----------------------+ + | | ``["--dont-do-it"]`` | + +------------+----------------------+ + - Creation (two args, ``None`` is ignored): + ``cmd_runner_fmt.as_bool("--relax", "--dont-do-it", ignore_none=True)`` + - Examples: + +------------+----------------------+ + | Value | Outcome | + +============+======================+ + | ``True`` | ``["--relax"]`` | + +------------+----------------------+ + | ``False`` | ``["--dont-do-it"]`` | + +------------+----------------------+ + | | ``[]`` | + +------------+----------------------+ + +- ``cmd_runner_fmt.as_bool_not()`` + This method receives one parameter, which is returned by the function when the boolean evaluation + of ``value`` is ``False``. + + - Creation: + ``cmd_runner_fmt.as_bool_not("--no-deps")`` + - Examples: + +-------------+---------------------+ + | Value | Outcome | + +=============+=====================+ + | ``True`` | ``[]`` | + +-------------+---------------------+ + | ``False`` | ``["--no-deps"]`` | + +-------------+---------------------+ + +- ``cmd_runner_fmt.as_optval()`` + This method receives one parameter ``arg``, the function returns the string concatenation + of ``arg`` and ``value``. + + - Creation: + ``cmd_runner_fmt.as_optval("-i")`` + - Examples: + +---------------+---------------------+ + | Value | Outcome | + +===============+=====================+ + | ``3`` | ``["-i3"]`` | + +---------------+---------------------+ + | ``foobar`` | ``["-ifoobar"]`` | + +---------------+---------------------+ + +- ``cmd_runner_fmt.as_opt_val()`` + This method receives one parameter ``arg``, the function returns ``[arg, value]``. + + - Creation: + ``cmd_runner_fmt.as_opt_val("--name")`` + - Examples: + +--------------+--------------------------+ + | Value | Outcome | + +==============+==========================+ + | ``abc`` | ``["--name", "abc"]`` | + +--------------+--------------------------+ + +- ``cmd_runner_fmt.as_opt_eq_val()`` + This method receives one parameter ``arg``, the function returns the string of the form + ``{arg}={value}``. + + - Creation: + ``cmd_runner_fmt.as_opt_eq_val("--num-cpus")`` + - Examples: + +------------+-------------------------+ + | Value | Outcome | + +============+=========================+ + | ``10`` | ``["--num-cpus=10"]`` | + +------------+-------------------------+ + +- ``cmd_runner_fmt.as_fixed()`` + This method defines one or more fixed arguments that are returned by the generated function + regardless whether ``value`` is passed to it or not. + + This method accepts these arguments in one of three forms: + + * one scalar parameter ``arg``, which will be returned as ``[arg]`` by the function, or + * one sequence parameter, such as a list, ``arg``, which will be returned by the function as ``arg[0]``, or + * multiple parameters ``args``, which will be returned as ``args`` directly by the function. + + See the examples below for each one of those forms. And, stressing that the generated function expects no ``value`` - if one + is provided then it is ignored. + + - Creation (one scalar argument): + * ``cmd_runner_fmt.as_fixed("--version")`` + - Examples: + +---------+--------------------------------------+ + | Value | Outcome | + +=========+======================================+ + | | * ``["--version"]`` | + +---------+--------------------------------------+ + | 57 | * ``["--version"]`` | + +---------+--------------------------------------+ + + - Creation (one sequence argument): + * ``cmd_runner_fmt.as_fixed(["--list", "--json"])`` + - Examples: + +---------+--------------------------------------+ + | Value | Outcome | + +=========+======================================+ + | | * ``["--list", "--json"]`` | + +---------+--------------------------------------+ + | True | * ``["--list", "--json"]`` | + +---------+--------------------------------------+ + + - Creation (multiple arguments): + * ``cmd_runner_fmt.as_fixed("--one", "--two", "--three")`` + - Examples: + +---------+--------------------------------------+ + | Value | Outcome | + +=========+======================================+ + | | * ``["--one", "--two", "--three"]`` | + +---------+--------------------------------------+ + | False | * ``["--one", "--two", "--three"]`` | + +---------+--------------------------------------+ + + - Note: + This is the only special case in which a value can be missing for the formatting function. + The first example here comes from the code in `Quickstart`_. + In that case, the module has code to determine the command's version so that it can assert compatibility. + There is no *value* to be passed for that CLI argument. + +- ``cmd_runner_fmt.as_map()`` + This method receives one parameter ``arg`` which must be a dictionary, and an optional parameter ``default``. + The function returns the evaluation of ``arg[value]``. + If ``value not in arg``, then it returns ``default`` if defined, otherwise ``[]``. + + - Creation: + ``cmd_runner_fmt.as_map(dict(a=1, b=2, c=3), default=42)`` + - Examples: + +---------------------+---------------+ + | Value | Outcome | + +=====================+===============+ + | ``"b"`` | ``["2"]`` | + +---------------------+---------------+ + | ``"yabadabadoo"`` | ``["42"]`` | + +---------------------+---------------+ + + - Note: + If ``default`` is not specified, invalid values return an empty list, meaning they are silently ignored. + +- ``cmd_runner_fmt.as_func()`` + This method receives one parameter ``arg`` which is itself is a format function and it must abide by the rules described above. + + - Creation: + ``cmd_runner_fmt.as_func(lambda v: [] if v == 'stable' else ['--channel', '{0}'.format(v)])`` + - Note: + The outcome for that depends entirely on the function provided by the developer. + + +Other features for argument formatting +"""""""""""""""""""""""""""""""""""""" + +Some additional features are available as decorators: + +- ``cmd_runner_fmt.unpack args()`` + This decorator unpacks the incoming ``value`` as a list of elements. + + For example, in ``ansible_collections.community.general.plugins.module_utils.puppet``, it is used as: + + .. code-block:: python + + @cmd_runner_fmt.unpack_args + def execute_func(execute, manifest): + if execute: + return ["--execute", execute] + else: + return [manifest] + + runner = CmdRunner( + module, + command=_prepare_base_cmd(), + path_prefix=_PUPPET_PATH_PREFIX, + arg_formats=dict( + # ... + _execute=cmd_runner_fmt.as_func(execute_func), + # ... + ), + ) + + Then, in :ansplugin:`community.general.puppet#module` it is put to use with: + + .. code-block:: python + + with runner(args_order) as ctx: + rc, stdout, stderr = ctx.run(_execute=[p['execute'], p['manifest']]) + +- ``cmd_runner_fmt.unpack_kwargs()`` + Conversely, this decorator unpacks the incoming ``value`` as a ``dict``-like object. + +- ``cmd_runner_fmt.stack()`` + This decorator assumes ``value`` is a sequence and concatenates the output + of the wrapped function applied to each element of the sequence. + + For example, in :ansplugin:`community.general.django_check#module`, the argument format for ``database`` + is defined as: + + .. code-block:: python + + arg_formats = dict( + # ... + database=cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val)("--database"), + # ... + ) + + When receiving a list ``["abc", "def"]``, the output is: + + .. code-block:: python + + ["--database", "abc", "--database", "def"] + + +Command Runner +^^^^^^^^^^^^^^ + +Settings that can be passed to the ``CmdRunner`` constructor are: + +- ``module: AnsibleModule`` + Module instance. Mandatory parameter. +- ``command: str | list[str]`` + Command to be executed. It can be a single string, the executable name, or a list + of strings containing the executable name as the first element and, optionally, fixed parameters. + Those parameters are used in all executions of the runner. + The *executable* pointed by this parameter (whether itself when ``str`` or its first element when ``list``) is + processed using ``AnsibleModule.get_bin_path()`` *unless* it is an absolute path or contains the character ``/``. +- ``arg_formats: dict`` + Mapping of argument names to formatting functions. +- ``default_args_order: str`` + As the name suggests, a default ordering for the arguments. When + this is passed, the context can be created without specifying ``args_order``. Defaults to ``()``. +- ``check_rc: bool`` + When ``True``, if the return code from the command is not zero, the module exits + with an error. Defaults to ``False``. +- ``path_prefix: list[str]`` + If the command being executed is installed in a non-standard directory path, + additional paths might be provided to search for the executable. Defaults to ``None``. +- ``environ_update: dict`` + Pass additional environment variables to be set during the command execution. + Defaults to ``None``. +- ``force_lang: str`` + It is usually important to force the locale to one specific value, so that responses are consistent and, therefore, parseable. + Please note that using this option (which is enabled by default) overwrites the environment variables ``LANGUAGE`` and ``LC_ALL``. + To disable this mechanism, set this parameter to ``None``. + In community.general 9.1.0 a special value ``auto`` was introduced for this parameter, with the effect + that ``CmdRunner`` then tries to determine the best parseable locale for the runtime. + It should become the default value in the future, but for the time being the default value is ``C``. + +When creating a context, the additional settings that can be passed to the call are: + +- ``args_order: str`` + Establishes the order in which the arguments are rendered in the command line. + This parameter is mandatory unless ``default_args_order`` was provided to the runner instance. +- ``output_process: func`` + Function to transform the output of the executable into different values or formats. + See examples in section below. +- ``check_mode_skip: bool`` + Whether to skip the actual execution of the command when the module is in check mode. + Defaults to ``False``. +- ``check_mode_return: any`` + If ``check_mode_skip=True``, then return this value instead. +- valid named arguments to ``AnsibleModule.run_command()`` + Other than ``args``, any valid argument to ``run_command()`` can be passed when setting up the run context. + For example, ``data`` can be used to send information to the command's standard input. + Or ``cwd`` can be used to run the command inside a specific working directory. + +Additionally, any other valid parameters for ``AnsibleModule.run_command()`` may be passed, but unexpected behavior +might occur if redefining options already present in the runner or its context creation. Use with caution. + + +Processing results +^^^^^^^^^^^^^^^^^^ + +As mentioned, ``CmdRunner`` uses ``AnsibleModule.run_command()`` to execute the external command, +and it passes the return value from that method back to caller. That means that, +by default, the result is going to be a tuple ``(rc, stdout, stderr)``. + +If you need to transform or process that output, you can pass a function to the context, +as the ``output_process`` parameter. It must be a function like: + +.. code-block:: python + + def process(rc, stdout, stderr): + # do some magic + return processed_value # whatever that is + +In that case, the return of ``run()`` is the ``processed_value`` returned by the function. + + +PythonRunner +^^^^^^^^^^^^ + +The ``PythonRunner`` class is a specialized version of ``CmdRunner``, geared towards the execution of +Python scripts. It features two extra and mutually exclusive parameters ``python`` and ``venv`` in its constructor: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.python_runner import PythonRunner + from ansible_collections.community.general.plugins.module_utils.cmd_runner import cmd_runner_fmt + + runner = PythonRunner( + module, + command=["-m", "django"], + arg_formats=dict(...), + python="python", + venv="/path/to/some/venv", + ) + +The default value for ``python`` is the string ``python``, and the for ``venv`` it is ``None``. + +The command line produced by such a command with ``python="python3.12"`` is something like: + +.. code-block:: shell + + /usr/bin/python3.12 -m django ... + +And the command line for ``venv="/work/venv"`` is like: + +.. code-block:: shell + + /work/venv/bin/python -m django ... + +You may provide the value of the ``command`` argument as a string (in that case the string is used as a script name) +or as a list, in which case the elements of the list must be valid arguments for the Python interpreter, as in the example above. +See `Command line and environment `_ for more details. + +If the parameter ``python`` is an absolute path, or contains directory separators, such as ``/``, then it is used +as-is, otherwise the runtime ``PATH`` is searched for that command name. + +Other than that, everything else works as in ``CmdRunner``. + +.. versionadded:: 4.8.0 diff --git a/docs/docsite/rst/guide_deps.rst b/docs/docsite/rst/guide_deps.rst new file mode 100644 index 0000000000..1a44051ee4 --- /dev/null +++ b/docs/docsite/rst/guide_deps.rst @@ -0,0 +1,75 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_deps: + +``deps`` Guide +============== + + +Using ``deps`` +^^^^^^^^^^^^^^ + +The ``ansible_collections.community.general.plugins.module_utils.deps`` module util simplifies +the importing of code as described in :ref:`Importing and using shared code `. +Please notice that ``deps`` is meant to be used specifically with Ansible modules, and not other types of plugins. + +The same example from the Developer Guide would become: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils import deps + + + with deps.declare("foo"): + import foo + +Then in ``main()``, just after the argspec (or anywhere in the code, for that matter), do + +.. code-block:: python + + deps.validate(module) # assuming module is a valid AnsibleModule instance + +By default, ``deps`` will rely on ``ansible.module_utils.basic.missing_required_lib`` to generate +a message about a failing import. That function accepts parameters ``reason`` and ``url``, and +and so does ``deps```: + +.. code-block:: python + + with deps.declare("foo", reason="foo is needed to properly bar", url="https://foo.bar.io"): + import foo + +If you would rather write a custom message instead of using ``missing_required_lib`` then do: + +.. code-block:: python + + with deps.declare("foo", msg="Custom msg explaining why foo is needed"): + import foo + +``deps`` allows for multiple dependencies to be declared: + +.. code-block:: python + + with deps.declare("foo"): + import foo + + with deps.declare("bar"): + import bar + + with deps.declare("doe"): + import doe + +By default, ``deps.validate()`` will check on all the declared dependencies, but if so desired, +they can be validated selectively by doing: + +.. code-block:: python + + deps.validate(module, "foo") # only validates the "foo" dependency + + deps.validate(module, "doe:bar") # only validates the "doe" and "bar" dependencies + + deps.validate(module, "-doe:bar") # validates all dependencies except "doe" and "bar" + +.. versionadded:: 6.1.0 diff --git a/docs/docsite/helper/lists_mergeby/examples_all.rst.j2 b/docs/docsite/rst/guide_iocage.rst similarity index 55% rename from docs/docsite/helper/lists_mergeby/examples_all.rst.j2 rename to docs/docsite/rst/guide_iocage.rst index 95a0fafddc..67eb0e8a99 100644 --- a/docs/docsite/helper/lists_mergeby/examples_all.rst.j2 +++ b/docs/docsite/rst/guide_iocage.rst @@ -3,11 +3,13 @@ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) SPDX-License-Identifier: GPL-3.0-or-later -{% for i in examples %} -{{ i.label }} +.. _ansible_collections.community.general.docsite.guide_iocage: -.. code-block:: {{ i.lang }} +************ +Iocage Guide +************ - {{ lookup('file', i.file)|indent(2) }} +.. toctree:: + :maxdepth: 1 -{% endfor %} + guide_iocage_inventory diff --git a/docs/docsite/rst/guide_iocage_inventory.rst b/docs/docsite/rst/guide_iocage_inventory.rst new file mode 100644 index 0000000000..4a410c35db --- /dev/null +++ b/docs/docsite/rst/guide_iocage_inventory.rst @@ -0,0 +1,31 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory: + +community.general.iocage inventory plugin +========================================= + +The inventory plugin :ansplugin:`community.general.iocage#inventory` gets the inventory hosts from the iocage jail manager. + +See: + +* `iocage - A FreeBSD Jail Manager `_ +* `man iocage `_ +* `Jails and Containers `_ + +.. note:: + The output of the examples is YAML formatted. See the option :ansopt:`ansible.builtin.default#callback:result_format`. + +.. toctree:: + :caption: Table of Contents + :maxdepth: 1 + + guide_iocage_inventory_basics + guide_iocage_inventory_dhcp + guide_iocage_inventory_hooks + guide_iocage_inventory_properties + guide_iocage_inventory_tags + guide_iocage_inventory_aliases diff --git a/docs/docsite/rst/guide_iocage_inventory_aliases.rst b/docs/docsite/rst/guide_iocage_inventory_aliases.rst new file mode 100644 index 0000000000..431403d733 --- /dev/null +++ b/docs/docsite/rst/guide_iocage_inventory_aliases.rst @@ -0,0 +1,200 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_aliases: + +Aliases +------- + +Quoting :ref:`inventory_aliases`: + + The ``inventory_hostname`` is the unique identifier for a host in Ansible, this can be an IP or a hostname, but also just an 'alias' or short name for the host. + +As root at the iocage host, stop and destroy all jails: + +.. code-block:: console + + shell> iocage stop ALL + * Stopping srv_1 + + Executing prestop OK + + Stopping services OK + + Tearing down VNET OK + + Removing devfs_ruleset: 1000 OK + + Removing jail process OK + + Executing poststop OK + * Stopping srv_2 + + Executing prestop OK + + Stopping services OK + + Tearing down VNET OK + + Removing devfs_ruleset: 1001 OK + + Removing jail process OK + + Executing poststop OK + * Stopping srv_3 + + Executing prestop OK + + Stopping services OK + + Tearing down VNET OK + + Removing devfs_ruleset: 1002 OK + + Removing jail process OK + + Executing poststop OK + ansible_client is not running! + + shell> iocage destroy -f srv_1 srv_2 srv_3 + Destroying srv_1 + Destroying srv_2 + Destroying srv_3 + +Create three VNET jails with a DHCP interface from the template *ansible_client*. Use the option ``--count``: + +.. code-block:: console + + shell> iocage create --short --template ansible_client --count 3 bpf=1 dhcp=1 vnet=1 + 1c11de2d successfully created! + 9d94cc9e successfully created! + 052b9557 successfully created! + +The names are random. Start the jails: + +.. code-block:: console + + shell> iocage start ALL + No default gateway found for ipv6. + * Starting 052b9557 + + Started OK + + Using devfs_ruleset: 1000 (iocage generated default) + + Configuring VNET OK + + Using IP options: vnet + + Starting services OK + + Executing poststart OK + + DHCP Address: 10.1.0.137/24 + No default gateway found for ipv6. + * Starting 1c11de2d + + Started OK + + Using devfs_ruleset: 1001 (iocage generated default) + + Configuring VNET OK + + Using IP options: vnet + + Starting services OK + + Executing poststart OK + + DHCP Address: 10.1.0.146/24 + No default gateway found for ipv6. + * Starting 9d94cc9e + + Started OK + + Using devfs_ruleset: 1002 (iocage generated default) + + Configuring VNET OK + + Using IP options: vnet + + Starting services OK + + Executing poststart OK + + DHCP Address: 10.1.0.115/24 + Please convert back to a jail before trying to start ansible_client + +List the jails: + +.. code-block:: console + + shell> iocage list -l + +-----+----------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL | + +=====+==========+======+=======+======+=================+====================+=====+================+==========+ + | 207 | 052b9557 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.137 | - | ansible_client | no | + +-----+----------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | 208 | 1c11de2d | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.146 | - | ansible_client | no | + +-----+----------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | 209 | 9d94cc9e | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.115 | - | ansible_client | no | + +-----+----------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + +Set notes. The tag *alias* will be used to create inventory aliases: + +.. code-block:: console + + shell> iocage set notes="vmm=iocage_02 project=foo alias=srv_1" 052b9557 + notes: none -> vmm=iocage_02 project=foo alias=srv_1 + shell> iocage set notes="vmm=iocage_02 project=foo alias=srv_2" 1c11de2d + notes: none -> vmm=iocage_02 project=foo alias=srv_2 + shell> iocage set notes="vmm=iocage_02 project=bar alias=srv_3" 9d94cc9e + notes: none -> vmm=iocage_02 project=bar alias=srv_3 + +Update the inventory configuration. Set the option +:ansopt:`community.general.iocage#inventory:inventory_hostname_tag` to :ansval:`alias`. This tag keeps the +value of the alias. The option :ansopt:`community.general.iocage#inventory:get_properties` must be +enabled. For example, ``hosts/02_iocage.yml`` contains: + +.. code-block:: yaml + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + get_properties: true + inventory_hostname_tag: alias + hooks_results: + - /var/db/dhclient-hook.address.epair0b + compose: + ansible_host: (iocage_hooks.0 == '-') | ternary(iocage_ip4, iocage_hooks.0) + iocage_tags: dict(iocage_properties.notes | split | map('split', '=')) + keyed_groups: + - prefix: vmm + key: iocage_tags.vmm + - prefix: project + key: iocage_tags.project + +Display tags and groups. Create a playbook ``pb-test-groups.yml`` with the following content: + +.. code-block:: yaml+jinja + + - hosts: all + remote_user: admin + + vars: + + ansible_python_interpreter: auto_silent + + tasks: + + - debug: + var: iocage_tags + + - debug: + msg: | + {% for group in groups %} + {{ group }}: {{ groups[group] }} + {% endfor %} + run_once: true + +Run the playbook: + +.. code-block:: console + + shell> ansible-playbook -i hosts/02_iocage.yml pb-test-groups.yml + + PLAY [all] ********************************************************************************************************** + + TASK [debug] ******************************************************************************************************** + ok: [srv_1] => + iocage_tags: + alias: srv_1 + project: foo + vmm: iocage_02 + ok: [srv_2] => + iocage_tags: + alias: srv_2 + project: foo + vmm: iocage_02 + ok: [srv_3] => + iocage_tags: + alias: srv_3 + project: bar + vmm: iocage_02 + + TASK [debug] ******************************************************************************************************** + ok: [srv_1] => + msg: |- + all: ['srv_1', 'srv_2', 'srv_3'] + ungrouped: [] + vmm_iocage_02: ['srv_1', 'srv_2', 'srv_3'] + project_foo: ['srv_1', 'srv_2'] + project_bar: ['srv_3'] + + PLAY RECAP ********************************************************************************************************** + srv_1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + srv_2 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + srv_3 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 diff --git a/docs/docsite/rst/guide_iocage_inventory_basics.rst b/docs/docsite/rst/guide_iocage_inventory_basics.rst new file mode 100644 index 0000000000..f198edc4f4 --- /dev/null +++ b/docs/docsite/rst/guide_iocage_inventory_basics.rst @@ -0,0 +1,128 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_basics: + +Basics +------ + +As root at the iocage host, create three VNET jails with a DHCP interface from the template +*ansible_client*: + +.. code-block:: console + + shell> iocage create --template ansible_client --name srv_1 bpf=1 dhcp=1 vnet=1 + srv_1 successfully created! + shell> iocage create --template ansible_client --name srv_2 bpf=1 dhcp=1 vnet=1 + srv_2 successfully created! + shell> iocage create --template ansible_client --name srv_3 bpf=1 dhcp=1 vnet=1 + srv_3 successfully created! + +See: `Configuring a VNET Jail `_. + +As admin at the controller, list the jails: + +.. code-block:: console + + shell> ssh admin@10.1.0.73 iocage list -l + +------+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL | + +======+=======+======+=======+======+=================+====================+=====+================+==========+ + | None | srv_1 | off | down | jail | 14.2-RELEASE-p3 | DHCP (not running) | - | ansible_client | no | + +------+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | None | srv_2 | off | down | jail | 14.2-RELEASE-p3 | DHCP (not running) | - | ansible_client | no | + +------+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | None | srv_3 | off | down | jail | 14.2-RELEASE-p3 | DHCP (not running) | - | ansible_client | no | + +------+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + +Create the inventory file ``hosts/02_iocage.yml`` + +.. code-block:: yaml + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + +Display the inventory: + +.. code-block:: console + + shell> ansible-inventory -i hosts/02_iocage.yml --list --yaml + all: + children: + ungrouped: + hosts: + srv_1: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_ip4: '-' + iocage_ip4_dict: + ip4: [] + msg: DHCP (not running) + iocage_ip6: '-' + iocage_jid: None + iocage_release: 14.2-RELEASE-p3 + iocage_state: down + iocage_template: ansible_client + iocage_type: jail + srv_2: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_ip4: '-' + iocage_ip4_dict: + ip4: [] + msg: DHCP (not running) + iocage_ip6: '-' + iocage_jid: None + iocage_release: 14.2-RELEASE-p3 + iocage_state: down + iocage_template: ansible_client + iocage_type: jail + srv_3: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_ip4: '-' + iocage_ip4_dict: + ip4: [] + msg: DHCP (not running) + iocage_ip6: '-' + iocage_jid: None + iocage_release: 14.2-RELEASE-p3 + iocage_state: down + iocage_template: ansible_client + iocage_type: jail + +Optionally, create shared IP jails: + +.. code-block:: console + + shell> iocage create --template ansible_client --name srv_1 ip4_addr="em0|10.1.0.101/24" + srv_1 successfully created! + shell> iocage create --template ansible_client --name srv_2 ip4_addr="em0|10.1.0.102/24" + srv_2 successfully created! + shell> iocage create --template ansible_client --name srv_3 ip4_addr="em0|10.1.0.103/24" + srv_3 successfully created! + shell> iocage list -l + +------+-------+------+-------+------+-----------------+-------------------+-----+----------------+----------+ + | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL | + +======+=======+======+=======+======+=================+===================+=====+================+==========+ + | None | srv_1 | off | down | jail | 14.2-RELEASE-p3 | em0|10.1.0.101/24 | - | ansible_client | no | + +------+-------+------+-------+------+-----------------+-------------------+-----+----------------+----------+ + | None | srv_2 | off | down | jail | 14.2-RELEASE-p3 | em0|10.1.0.102/24 | - | ansible_client | no | + +------+-------+------+-------+------+-----------------+-------------------+-----+----------------+----------+ + | None | srv_3 | off | down | jail | 14.2-RELEASE-p3 | em0|10.1.0.103/24 | - | ansible_client | no | + +------+-------+------+-------+------+-----------------+-------------------+-----+----------------+----------+ + +See: `Configuring a Shared IP Jail `_ + +If iocage needs environment variable(s), use the option :ansopt:`community.general.iocage#inventory:env`. For example, + +.. code-block:: yaml + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + env: + CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1 diff --git a/docs/docsite/rst/guide_iocage_inventory_dhcp.rst b/docs/docsite/rst/guide_iocage_inventory_dhcp.rst new file mode 100644 index 0000000000..3c37366ca6 --- /dev/null +++ b/docs/docsite/rst/guide_iocage_inventory_dhcp.rst @@ -0,0 +1,175 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_dhcp: + +DHCP +---- + +As root at the iocage host, start the jails: + +.. code-block:: console + + shell> iocage start ALL + No default gateway found for ipv6. + * Starting srv_1 + + Started OK + + Using devfs_ruleset: 1000 (iocage generated default) + + Configuring VNET OK + + Using IP options: vnet + + Starting services OK + + Executing poststart OK + + DHCP Address: 10.1.0.183/24 + No default gateway found for ipv6. + * Starting srv_2 + + Started OK + + Using devfs_ruleset: 1001 (iocage generated default) + + Configuring VNET OK + + Using IP options: vnet + + Starting services OK + + Executing poststart OK + + DHCP Address: 10.1.0.204/24 + No default gateway found for ipv6. + * Starting srv_3 + + Started OK + + Using devfs_ruleset: 1002 (iocage generated default) + + Configuring VNET OK + + Using IP options: vnet + + Starting services OK + + Executing poststart OK + + DHCP Address: 10.1.0.169/24 + Please convert back to a jail before trying to start ansible_client + +List the jails: + +.. code-block:: console + + shell> iocage list -l + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL | + +=====+=======+======+=======+======+=================+====================+=====+================+==========+ + | 204 | srv_1 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.183 | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | 205 | srv_2 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.204 | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | 206 | srv_3 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.169 | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + +As admin at the controller, list the jails. The IP4 tab says "... address requires root": + +.. code-block:: console + + shell> ssh admin@10.1.0.73 iocage list -l + +-----+-------+------+-------+------+-----------------+-----------------------------------------+-----+----------------+----------+ + | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL | + +=====+=======+======+=======+======+=================+=========================================+=====+================+==========+ + | 204 | srv_1 | off | up | jail | 14.2-RELEASE-p3 | DHCP (running -- address requires root) | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+-----------------------------------------+-----+----------------+----------+ + | 205 | srv_2 | off | up | jail | 14.2-RELEASE-p3 | DHCP (running -- address requires root) | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+-----------------------------------------+-----+----------------+----------+ + | 206 | srv_3 | off | up | jail | 14.2-RELEASE-p3 | DHCP (running -- address requires root) | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+-----------------------------------------+-----+----------------+----------+ + +Use sudo if enabled: + +.. code-block:: console + + shell> ssh admin@10.1.0.73 sudo iocage list -l + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | JID | NAME | BOOT | STATE | TYPE | RELEASE | IP4 | IP6 | TEMPLATE | BASEJAIL | + +=====+=======+======+=======+======+=================+====================+=====+================+==========+ + | 204 | srv_1 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.183 | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | 205 | srv_2 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.204 | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + | 206 | srv_3 | off | up | jail | 14.2-RELEASE-p3 | epair0b|10.1.0.169 | - | ansible_client | no | + +-----+-------+------+-------+------+-----------------+--------------------+-----+----------------+----------+ + +Create the inventory file ``hosts/02_iocage.yml``. Use the option +:ansopt:`community.general.iocage#inventory:sudo`: + +.. code-block:: yaml + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + sudo: true + +Display the inventory: + +.. code-block:: console + + shell> ansible-inventory -i hosts/02_iocage.yml --list --yaml + all: + children: + ungrouped: + hosts: + srv_1: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_ip4: 10.1.0.183 + iocage_ip4_dict: + ip4: + - ifc: epair0b + ip: 10.1.0.183 + mask: '-' + msg: '' + iocage_ip6: '-' + iocage_jid: '204' + iocage_release: 14.2-RELEASE-p3 + iocage_state: up + iocage_template: ansible_client + iocage_type: jail + srv_2: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_ip4: 10.1.0.204 + iocage_ip4_dict: + ip4: + - ifc: epair0b + ip: 10.1.0.204 + mask: '-' + msg: '' + iocage_ip6: '-' + iocage_jid: '205' + iocage_release: 14.2-RELEASE-p3 + iocage_state: up + iocage_template: ansible_client + iocage_type: jail + srv_3: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_ip4: 10.1.0.169 + iocage_ip4_dict: + ip4: + - ifc: epair0b + ip: 10.1.0.169 + mask: '-' + msg: '' + iocage_ip6: '-' + iocage_jid: '206' + iocage_release: 14.2-RELEASE-p3 + iocage_state: up + iocage_template: ansible_client + iocage_type: jail + +Note: If the option :ansopt:`community.general.iocage#inventory:env` is used and :ansopt:`community.general.iocage#inventory:sudo` is enabled, enable also :ansopt:`community.general.iocage#inventory:sudo_preserve_env`. For example, + +.. code-block:: yaml + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + env: + CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1 + sudo: true + sudo_preserve_env: true + +In this case, make sure the sudo tag ``SETENV`` is used: + +.. code-block:: console + + shell> ssh admin@10.1.0.73 sudo cat /usr/local/etc/sudoers | grep admin + admin ALL=(ALL) NOPASSWD:SETENV: ALL diff --git a/docs/docsite/rst/guide_iocage_inventory_hooks.rst b/docs/docsite/rst/guide_iocage_inventory_hooks.rst new file mode 100644 index 0000000000..45364fc798 --- /dev/null +++ b/docs/docsite/rst/guide_iocage_inventory_hooks.rst @@ -0,0 +1,187 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_hooks: + +Hooks +----- + +The iocage utility internally opens a console to a jail to get the jail's DHCP address. This +requires root. If you run the command ``iocage list -l`` as unprivileged user, you'll see the +message ``DHCP (running -- address requires root)``. If you are not granted the root privilege, use +``/etc/dhclient-exit-hooks``. For example, in the jail *srv_1*, create the file +``/zroot/iocage/jails/srv_1/root/etc/dhclient-exit-hooks`` + +.. code-block:: shell + + case "$reason" in + "BOUND"|"REBIND"|"REBOOT"|"RENEW") + echo $new_ip_address > /var/db/dhclient-hook.address.$interface + ;; + esac + +where ``/zroot/iocage`` is the activated pool. + +.. code-block:: console + + shell> zfs list | grep /zroot/iocage + zroot/iocage 4.69G 446G 5.08M /zroot/iocage + zroot/iocage/download 927M 446G 384K /zroot/iocage/download + zroot/iocage/download/14.1-RELEASE 465M 446G 465M /zroot/iocage/download/14.1-RELEASE + zroot/iocage/download/14.2-RELEASE 462M 446G 462M /zroot/iocage/download/14.2-RELEASE + zroot/iocage/images 384K 446G 384K /zroot/iocage/images + zroot/iocage/jails 189M 446G 480K /zroot/iocage/jails + zroot/iocage/jails/srv_1 62.9M 446G 464K /zroot/iocage/jails/srv_1 + zroot/iocage/jails/srv_1/root 62.4M 446G 3.53G /zroot/iocage/jails/srv_1/root + zroot/iocage/jails/srv_2 62.8M 446G 464K /zroot/iocage/jails/srv_2 + zroot/iocage/jails/srv_2/root 62.3M 446G 3.53G /zroot/iocage/jails/srv_2/root + zroot/iocage/jails/srv_3 62.8M 446G 464K /zroot/iocage/jails/srv_3 + zroot/iocage/jails/srv_3/root 62.3M 446G 3.53G /zroot/iocage/jails/srv_3/root + zroot/iocage/log 688K 446G 688K /zroot/iocage/log + zroot/iocage/releases 2.93G 446G 384K /zroot/iocage/releases + zroot/iocage/releases/14.2-RELEASE 2.93G 446G 384K /zroot/iocage/releases/14.2-RELEASE + zroot/iocage/releases/14.2-RELEASE/root 2.93G 446G 2.88G /zroot/iocage/releases/14.2-RELEASE/root + zroot/iocage/templates 682M 446G 416K /zroot/iocage/templates + zroot/iocage/templates/ansible_client 681M 446G 432K /zroot/iocage/templates/ansible_client + zroot/iocage/templates/ansible_client/root 681M 446G 3.53G /zroot/iocage/templates/ansible_client/root + +See: `man dhclient-script `_ + +Create the inventory configuration. Use the option :ansopt:`community.general.iocage#inventory:hooks_results` instead of :ansopt:`community.general.iocage#inventory:sudo`: + +.. code-block:: console + + shell> cat hosts/02_iocage.yml + +.. code-block:: yaml + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + hooks_results: + - /var/db/dhclient-hook.address.epair0b + +.. note:: + + The option :ansopt:`community.general.iocage#inventory:hooks_results` expects the poolname to be mounted to ``/poolname``. For example, if you + activate the pool iocage, this plugin expects to find the :ansopt:`community.general.iocage#inventory:hooks_results` items in the path + /iocage/iocage/jails//root. If you mount the poolname to a different path, the easiest + remedy is to create a symlink. + +As admin at the controller, display the inventory: + +.. code-block:: console + + shell> ansible-inventory -i hosts/02_iocage.yml --list --yaml + all: + children: + ungrouped: + hosts: + srv_1: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_hooks: + - 10.1.0.183 + iocage_ip4: '-' + iocage_ip4_dict: + ip4: [] + msg: DHCP (running -- address requires root) + iocage_ip6: '-' + iocage_jid: '204' + iocage_release: 14.2-RELEASE-p3 + iocage_state: up + iocage_template: ansible_client + iocage_type: jail + srv_2: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_hooks: + - 10.1.0.204 + iocage_ip4: '-' + iocage_ip4_dict: + ip4: [] + msg: DHCP (running -- address requires root) + iocage_ip6: '-' + iocage_jid: '205' + iocage_release: 14.2-RELEASE-p3 + iocage_state: up + iocage_template: ansible_client + iocage_type: jail + srv_3: + iocage_basejail: 'no' + iocage_boot: 'off' + iocage_hooks: + - 10.1.0.169 + iocage_ip4: '-' + iocage_ip4_dict: + ip4: [] + msg: DHCP (running -- address requires root) + iocage_ip6: '-' + iocage_jid: '206' + iocage_release: 14.2-RELEASE-p3 + iocage_state: up + iocage_template: ansible_client + iocage_type: jail + +Compose the variable ``ansible_host``. For example, ``hosts/02_iocage.yml`` could look like: + +.. code-block:: yaml+jinja + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + hooks_results: + - /var/db/dhclient-hook.address.epair0b + compose: + ansible_host: (iocage_hooks.0 == '-') | ternary(iocage_ip4, iocage_hooks.0) + +Test the jails. Create a playbook ``pb-test-uname.yml``: + +.. code-block:: yaml + + - hosts: all + remote_user: admin + + vars: + + ansible_python_interpreter: auto_silent + + tasks: + + - command: uname -a + register: out + + - debug: + var: out.stdout + +See: :ref:`working_with_bsd` + +Run the playbook: + +.. code-block:: console + + shell> ansible-playbook -i hosts/02_iocage.yml pb-test-uname.yml + + PLAY [all] ********************************************************************************************************** + + TASK [command] ****************************************************************************************************** + changed: [srv_3] + changed: [srv_1] + changed: [srv_2] + + TASK [debug] ******************************************************************************************************** + ok: [srv_1] => + out.stdout: FreeBSD srv-1 14.2-RELEASE-p1 FreeBSD 14.2-RELEASE-p1 GENERIC amd64 + ok: [srv_3] => + out.stdout: FreeBSD srv-3 14.2-RELEASE-p1 FreeBSD 14.2-RELEASE-p1 GENERIC amd64 + ok: [srv_2] => + out.stdout: FreeBSD srv-2 14.2-RELEASE-p1 FreeBSD 14.2-RELEASE-p1 GENERIC amd64 + + PLAY RECAP ********************************************************************************************************** + srv_1 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + srv_2 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + srv_3 : ok=2 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + +Note: This playbook and the inventory configuration works also for the *Shared IP Jails*. diff --git a/docs/docsite/rst/guide_iocage_inventory_properties.rst b/docs/docsite/rst/guide_iocage_inventory_properties.rst new file mode 100644 index 0000000000..d044f2e7f2 --- /dev/null +++ b/docs/docsite/rst/guide_iocage_inventory_properties.rst @@ -0,0 +1,201 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_properties: + +Properties +---------- + +Optionally, in the inventory file ``hosts/02_iocage.yml``, get the iocage properties. Enable +:ansopt:`community.general.iocage#inventory:get_properties`: + +.. code-block:: yaml+jinja + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + get_properties: true + hooks_results: + - /var/db/dhclient-hook.address.epair0b + compose: + ansible_host: (iocage_hooks.0 == '-') | ternary(iocage_ip4, iocage_hooks.0) + +Display the properties. Create the playbook ``pb-test-properties.yml``: + +.. code-block:: yaml + + - hosts: all + remote_user: admin + + vars: + + ansible_python_interpreter: auto_silent + + tasks: + + - debug: + var: iocage_properties + +Run the playbook. Limit the inventory to *srv_3*: + +.. code-block:: console + + shell> ansible-playbook -i hosts/02_iocage.yml -l srv_3 pb-test-properties.yml + + PLAY [all] ********************************************************************************************************** + + TASK [debug] ******************************************************************************************************** + ok: [srv_3] => + iocage_properties: + CONFIG_VERSION: '33' + allow_chflags: '0' + allow_mlock: '0' + allow_mount: '1' + allow_mount_devfs: '0' + allow_mount_fdescfs: '0' + allow_mount_fusefs: '0' + allow_mount_linprocfs: '0' + allow_mount_linsysfs: '0' + allow_mount_nullfs: '0' + allow_mount_procfs: '0' + allow_mount_tmpfs: '0' + allow_mount_zfs: '0' + allow_nfsd: '0' + allow_quotas: '0' + allow_raw_sockets: '0' + allow_set_hostname: '1' + allow_socket_af: '0' + allow_sysvipc: '0' + allow_tun: '0' + allow_vmm: '0' + assign_localhost: '0' + available: readonly + basejail: '0' + boot: '0' + bpf: '1' + children_max: '0' + cloned_release: 14.2-RELEASE + comment: none + compression: 'on' + compressratio: readonly + coredumpsize: 'off' + count: '1' + cpuset: 'off' + cputime: 'off' + datasize: 'off' + dedup: 'off' + defaultrouter: auto + defaultrouter6: auto + depends: none + devfs_ruleset: '4' + dhcp: '1' + enforce_statfs: '2' + exec_clean: '1' + exec_created: /usr/bin/true + exec_fib: '0' + exec_jail_user: root + exec_poststart: /usr/bin/true + exec_poststop: /usr/bin/true + exec_prestart: /usr/bin/true + exec_prestop: /usr/bin/true + exec_start: /bin/sh /etc/rc + exec_stop: /bin/sh /etc/rc.shutdown + exec_system_jail_user: '0' + exec_system_user: root + exec_timeout: '60' + host_domainname: none + host_hostname: srv-3 + host_hostuuid: srv_3 + host_time: '1' + hostid: ea2ba7d1-4fcd-f13f-82e4-8b32c0a03403 + hostid_strict_check: '0' + interfaces: vnet0:bridge0 + ip4: new + ip4_addr: none + ip4_saddrsel: '1' + ip6: new + ip6_addr: none + ip6_saddrsel: '1' + ip_hostname: '0' + jail_zfs: '0' + jail_zfs_dataset: iocage/jails/srv_3/data + jail_zfs_mountpoint: none + last_started: '2025-06-11 04:29:23' + localhost_ip: none + login_flags: -f root + mac_prefix: 02a098 + maxproc: 'off' + memorylocked: 'off' + memoryuse: 'off' + min_dyn_devfs_ruleset: '1000' + mount_devfs: '1' + mount_fdescfs: '1' + mount_linprocfs: '0' + mount_procfs: '0' + mountpoint: readonly + msgqqueued: 'off' + msgqsize: 'off' + nat: '0' + nat_backend: ipfw + nat_forwards: none + nat_interface: none + nat_prefix: '172.16' + nmsgq: 'off' + notes: none + nsem: 'off' + nsemop: 'off' + nshm: 'off' + nthr: 'off' + openfiles: 'off' + origin: readonly + owner: root + pcpu: 'off' + plugin_name: none + plugin_repository: none + priority: '99' + pseudoterminals: 'off' + quota: none + readbps: 'off' + readiops: 'off' + release: 14.2-RELEASE-p3 + reservation: none + resolver: /etc/resolv.conf + rlimits: 'off' + rtsold: '0' + securelevel: '2' + shmsize: 'off' + source_template: ansible_client + stacksize: 'off' + state: up + stop_timeout: '30' + swapuse: 'off' + sync_state: none + sync_target: none + sync_tgt_zpool: none + sysvmsg: new + sysvsem: new + sysvshm: new + template: '0' + type: jail + used: readonly + vmemoryuse: 'off' + vnet: '1' + vnet0_mac: 02a0983da05d 02a0983da05e + vnet0_mtu: auto + vnet1_mac: none + vnet1_mtu: auto + vnet2_mac: none + vnet2_mtu: auto + vnet3_mac: none + vnet3_mtu: auto + vnet_default_interface: auto + vnet_default_mtu: '1500' + vnet_interfaces: none + wallclock: 'off' + writebps: 'off' + writeiops: 'off' + + PLAY RECAP ********************************************************************************************************** + srv_3 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 diff --git a/docs/docsite/rst/guide_iocage_inventory_tags.rst b/docs/docsite/rst/guide_iocage_inventory_tags.rst new file mode 100644 index 0000000000..8adf641073 --- /dev/null +++ b/docs/docsite/rst/guide_iocage_inventory_tags.rst @@ -0,0 +1,117 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_iocage.guide_iocage_inventory.guide_iocage_inventory_tags: + +Tags +---- + +Quoting `man iocage `_ + +.. code-block:: text + + PROPERTIES + ... + notes="any string" + Custom notes for miscellaneous tagging. + Default: none + Source: local + +We will use the format ``notes="tag1=value1 tag2=value2 ..."``. + +.. note:: + + The iocage tags have nothing to do with the :ref:`tags`. + +As root at the iocage host, set notes. For example, + +.. code-block:: console + + shell> iocage set notes="vmm=iocage_02 project=foo" srv_1 + notes: none -> vmm=iocage_02 project=foo + shell> iocage set notes="vmm=iocage_02 project=foo" srv_2 + notes: none -> vmm=iocage_02 project=foo + shell> iocage set notes="vmm=iocage_02 project=bar" srv_3 + notes: none -> vmm=iocage_02 project=bar + +Update the inventory configuration. Compose a dictionary *iocage_tags* and create groups. The option +:ansopt:`community.general.iocage#inventory:get_properties` must be enabled. +For example, ``hosts/02_iocage.yml`` could look like: + +.. code-block:: yaml + + plugin: community.general.iocage + host: 10.1.0.73 + user: admin + get_properties: true + hooks_results: + - /var/db/dhclient-hook.address.epair0b + compose: + ansible_host: (iocage_hooks.0 == '-') | ternary(iocage_ip4, iocage_hooks.0) + iocage_tags: dict(iocage_properties.notes | split | map('split', '=')) + keyed_groups: + - prefix: vmm + key: iocage_tags.vmm + - prefix: project + key: iocage_tags.project + +Display tags and groups. Create a playbook ``pb-test-groups.yml``: + +.. code-block:: yaml+jinja + + - hosts: all + remote_user: admin + + vars: + + ansible_python_interpreter: auto_silent + + tasks: + + - debug: + var: iocage_tags + + - debug: + msg: | + {% for group in groups %} + {{ group }}: {{ groups[group] }} + {% endfor %} + run_once: true + +Run the playbook: + +.. code-block:: console + + shell> ansible-playbook -i hosts/02_iocage.yml pb-test-groups.yml + + PLAY [all] ********************************************************************************************************** + + TASK [debug] ******************************************************************************************************** + ok: [srv_1] => + iocage_tags: + project: foo + vmm: iocage_02 + ok: [srv_2] => + iocage_tags: + project: foo + vmm: iocage_02 + ok: [srv_3] => + iocage_tags: + project: bar + vmm: iocage_02 + + TASK [debug] ******************************************************************************************************** + ok: [srv_1] => + msg: |- + all: ['srv_1', 'srv_2', 'srv_3'] + ungrouped: [] + vmm_iocage_02: ['srv_1', 'srv_2', 'srv_3'] + project_foo: ['srv_1', 'srv_2'] + project_bar: ['srv_3'] + + PLAY RECAP ********************************************************************************************************** + srv_1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + srv_2 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 + srv_3 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 diff --git a/docs/docsite/rst/guide_modulehelper.rst b/docs/docsite/rst/guide_modulehelper.rst new file mode 100644 index 0000000000..711cdc7f99 --- /dev/null +++ b/docs/docsite/rst/guide_modulehelper.rst @@ -0,0 +1,559 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_modulehelper: + +Module Helper guide +=================== + + +Introduction +^^^^^^^^^^^^ + +Writing a module for Ansible is largely described in existing documentation. +However, a good part of that is boilerplate code that needs to be repeated every single time. +That is where ``ModuleHelper`` comes to assistance: a lot of that boilerplate code is done. + +.. _ansible_collections.community.general.docsite.guide_modulehelper.quickstart: + +Quickstart +"""""""""" + +See the `example from Ansible documentation `_ +written with ``ModuleHelper``. +But bear in mind that it does not showcase all of MH's features: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper + + + class MyTest(ModuleHelper): + module = dict( + argument_spec=dict( + name=dict(type='str', required=True), + new=dict(type='bool', required=False, default=False), + ), + supports_check_mode=True, + ) + + def __run__(self): + self.vars.original_message = '' + self.vars.message = '' + if self.check_mode: + return + self.vars.original_message = self.vars.name + self.vars.message = 'goodbye' + self.changed = self.vars['new'] + if self.vars.name == "fail me": + self.do_raise("You requested this to fail") + + + def main(): + MyTest.execute() + + + if __name__ == '__main__': + main() + + +Module Helper +^^^^^^^^^^^^^ + +Introduction +"""""""""""" + +``ModuleHelper`` is a wrapper around the standard ``AnsibleModule``, providing extra features and conveniences. +The basic structure of a module using ``ModuleHelper`` is as shown in the +:ref:`ansible_collections.community.general.docsite.guide_modulehelper.quickstart` +section above, but there are more elements that will take part in it. + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper + + + class MyTest(ModuleHelper): + # behavior for module paramaters ONLY, see below for further information + output_params = () + change_params = () + diff_params = () + facts_params = () + + facts_name = None # used if generating facts, from parameters or otherwise + + module = dict( + argument_spec=dict(...), + # ... + ) + +After importing the ``ModuleHelper`` class, you need to declare your own class extending it. + +.. seealso:: + + There is a variation called ``StateModuleHelper``, which builds on top of the features provided by MH. + See :ref:`ansible_collections.community.general.docsite.guide_modulehelper.statemh` below for more details. + +The easiest way of specifying the module is to create the class variable ``module`` with a dictionary +containing the exact arguments that would be passed as parameters to ``AnsibleModule``. +If you prefer to create the ``AnsibleModule`` object yourself, just assign it to the ``module`` class variable. +MH also accepts a parameter ``module`` in its constructor, if that parameter is used used, +then it will override the class variable. The parameter can either be ``dict`` or ``AnsibleModule`` as well. + +Beyond the definition of the module, there are other variables that can be used to control aspects +of MH's behavior. These variables should be set at the very beginning of the class, and their semantics are +explained through this document. + +The main logic of MH happens in the ``ModuleHelper.run()`` method, which looks like: + +.. code-block:: python + + @module_fails_on_exception + def run(self): + self.__init_module__() + self.__run__() + self.__quit_module__() + output = self.output + if 'failed' not in output: + output['failed'] = False + self.module.exit_json(changed=self.has_changed(), **output) + +The method ``ModuleHelper.__run__()`` must be implemented by the module and most +modules will be able to perform their actions implementing only that MH method. +However, in some cases, you might want to execute actions before or after the main tasks, in which cases +you should implement ``ModuleHelper.__init_module__()`` and ``ModuleHelper.__quit_module__()`` respectively. + +Note that the output comes from ``self.output``, which is a ``@property`` method. +By default, that property will collect all the variables that are marked for output and return them in a dictionary with their values. +Moreover, the default ``self.output`` will also handle Ansible ``facts`` and *diff mode*. +Also note the changed status comes from ``self.has_changed()``, which is usually calculated from variables that are marked +to track changes in their content. + +.. seealso:: + + More details in sections + :ref:`ansible_collections.community.general.docsite.guide_modulehelper.paramvaroutput` and + :ref:`ansible_collections.community.general.docsite.guide_modulehelper.changes` below. + +.. seealso:: + + See more about the decorator + :ref:`ansible_collections.community.general.docsite.guide_modulehelper.modulefailsdeco` below. + + +Another way to write the example from the +:ref:`ansible_collections.community.general.docsite.guide_modulehelper.quickstart` +would be: + +.. code-block:: python + + def __init_module__(self): + self.vars.original_message = '' + self.vars.message = '' + + def __run__(self): + if self.check_mode: + return + self.vars.original_message = self.vars.name + self.vars.message = 'goodbye' + self.changed = self.vars['new'] + + def __quit_module__(self): + if self.vars.name == "fail me": + self.do_raise("You requested this to fail") + +Notice that there are no calls to ``module.exit_json()`` nor ``module.fail_json()``: if the module fails, raise an exception. +You can use the convenience method ``self.do_raise()`` or raise the exception as usual in Python to do that. +If no exception is raised, then the module succeeds. + +.. seealso:: + + See more about exceptions in section + :ref:`ansible_collections.community.general.docsite.guide_modulehelper.exceptions` below. + +Ansible modules must have a ``main()`` function and the usual test for ``'__main__'``. When using MH that should look like: + +.. code-block:: python + + def main(): + MyTest.execute() + + + if __name__ == '__main__': + main() + +The class method ``execute()`` is nothing more than a convenience shorcut for: + +.. code-block:: python + + m = MyTest() + m.run() + +Optionally, an ``AnsibleModule`` may be passed as parameter to ``execute()``. + +.. _ansible_collections.community.general.docsite.guide_modulehelper.paramvaroutput: + +Parameters, variables, and output +""""""""""""""""""""""""""""""""" + +All the parameters automatically become variables in the ``self.vars`` attribute, which is of the ``VarDict`` type. +By using ``self.vars``, you get a central mechanism to access the parameters but also to expose variables as return values of the module. +As described in :ref:`ansible_collections.community.general.docsite.guide_vardict`, variables in ``VarDict`` have metadata associated to them. +One of the attributes in that metadata marks the variable for output, and MH makes use of that to generate the module's return values. + +.. note:: + + The ``VarDict`` class was introduced in community.general 7.1.0, as part of ``ModuleHelper`` itself. + However, it has been factored out to become an utility on its own, described in :ref:`ansible_collections.community.general.docsite.guide_vardict`, + and the older implementation was removed in community.general 11.0.0. + + Some code might still refer to the class variables ``use_old_vardict`` and ``mute_vardict_deprecation``, used for the transtition to the new + implementation but from community.general 11.0.0 onwards they are no longer used and can be safely removed from the code. + +Contrary to new variables created in ``VarDict``, module parameters are not set for output by default. +If you want to include some module parameters in the output, list them in the ``output_params`` class variable. + +.. code-block:: python + + class MyTest(ModuleHelper): + output_params = ('state', 'name') + ... + +.. important:: + + The variable names listed in ``output_params`` **must be module parameters**, as in parameters listed in the module's ``argument_spec``. + Names not found in ``argument_spec`` are silently ignored. + +Another neat feature provided by MH by using ``VarDict`` is the automatic tracking of changes when setting the metadata ``change=True``. +Again, to enable this feature for module parameters, you must list them in the ``change_params`` class variable. + +.. code-block:: python + + class MyTest(ModuleHelper): + # example from community.general.xfconf + change_params = ('value', ) + ... + +.. important:: + + The variable names listed in ``change_params`` **must be module parameters**, as in parameters listed in the module's ``argument_spec``. + Names not found in ``argument_spec`` are silently ignored. + +.. seealso:: + + See more about this in + :ref:`ansible_collections.community.general.docsite.guide_modulehelper.changes` below. + +Similarly, if you want to use Ansible's diff mode, you can set the metadata ``diff=True`` and ``diff_params`` for module parameters. +With that, MH will automatically generate the diff output for variables that have changed. + +.. code-block:: python + + class MyTest(ModuleHelper): + diff_params = ('value', ) + + def __run__(self): + # example from community.general.gio_mime + self.vars.set_meta("handler", initial_value=gio_mime_get(self.runner, self.vars.mime_type), diff=True, change=True) + +.. important:: + + The variable names listed in ``diff_params`` **must be module parameters**, as in parameters listed in the module's ``argument_spec``. + Names not found in ``argument_spec`` are silently ignored. + +Moreover, if a module is set to return *facts* instead of return values, then again use the metadata ``fact=True`` and ``fact_params`` for module parameters. +Additionally, you must specify ``facts_name``, as in: + +.. code-block:: python + + class VolumeFacts(ModuleHelper): + facts_name = 'volume_facts' + + def __init_module__(self): + self.vars.set("volume", 123, fact=True) + +That generates an Ansible fact like: + +.. code-block:: yaml+jinja + + - name: Obtain volume facts + some.collection.volume_facts: + # parameters + + - name: Print volume facts + debug: + msg: Volume fact is {{ ansible_facts.volume_facts.volume }} + +.. important:: + + The variable names listed in ``fact_params`` **must be module parameters**, as in parameters listed in the module's ``argument_spec``. + Names not found in ``argument_spec`` are silently ignored. + +.. important:: + + If ``facts_name`` is not set, the module does not generate any facts. + + +.. _ansible_collections.community.general.docsite.guide_modulehelper.changes: + +Handling changes +"""""""""""""""" + +In MH there are many ways to indicate change in the module execution. Here they are: + +Tracking changes in variables +----------------------------- + +As explained above, you can enable change tracking in any number of variables in ``self.vars``. +By the end of the module execution, if any of those variables has a value different then the first value assigned to them, +then that will be picked up by MH and signalled as changed at the module output. +See the example below to learn how you can enabled change tracking in variables: + +.. code-block:: python + + # using __init_module__() as example, it works the same in __run__() and __quit_module__() + def __init_module__(self): + # example from community.general.ansible_galaxy_install + self.vars.set("new_roles", {}, change=True) + + # example of "hidden" variable used only to track change in a value from community.general.gconftool2 + self.vars.set('_value', self.vars.previous_value, output=False, change=True) + + # enable change-tracking without assigning value + self.vars.set_meta("new_roles", change=True) + + # if you must forcibly set an initial value to the variable + self.vars.set_meta("new_roles", initial_value=[]) + ... + +If the end value of any variable marked ``change`` is different from its initial value, then MH will return ``changed=True``. + +Indicating changes with ``changed`` +----------------------------------- + +If you want to indicate change directly in the code, then use the ``self.changed`` property in MH. +Beware that this is a ``@property`` method in MH, with both a *getter* and a *setter*. +By default, that hidden field is set to ``False``. + +Effective change +---------------- + +The effective outcome for the module is determined in the ``self.has_changed()`` method, and it consists of the logical *OR* operation +between ``self.changed`` and the change calculated from ``self.vars``. + +.. _ansible_collections.community.general.docsite.guide_modulehelper.exceptions: + +Exceptions +"""""""""" + +In MH, instead of calling ``module.fail_json()`` you can just raise an exception. +The output variables are collected the same way they would be for a successful execution. +However, you can set output variables specifically for that exception, if you so choose. + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelperException + + def __init_module__(self): + if not complex_validation(): + self.do_raise("Validation failed!") + + # Or passing output variables + awesomeness = calculate_awesomeness() + if awesomeness > 1000: + self.do_raise("Over awesome, I cannot handle it!", update_output={"awesomeness": awesomeness}) + # which is just a convenience shortcut for + raise ModuleHelperException("...", update_output={...}) + +All exceptions derived from ``Exception`` are captured and translated into a ``fail_json()`` call. +However, if you do want to call ``self.module.fail_json()`` yourself it will work, +just keep in mind that there will be no automatic handling of output variables in that case. + +Behind the curtains, all ``do_raise()`` does is to raise a ``ModuleHelperException``. +If you want to create specialized error handling for your code, the best way is to extend that clas and raise it when needed. + +.. _ansible_collections.community.general.docsite.guide_modulehelper.statemh: + +StateModuleHelper +^^^^^^^^^^^^^^^^^ + +Many modules use a parameter ``state`` that effectively controls the exact action performed by the module, such as +``state=present`` or ``state=absent`` for installing or removing packages. +By using ``StateModuleHelper`` you can make your code like the excerpt from the ``gconftool2`` below: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper + + class GConftool(StateModuleHelper): + ... + module = dict( + ... + ) + + def __init_module__(self): + self.runner = gconftool2_runner(self.module, check_rc=True) + ... + + self.vars.set('previous_value', self._get(), fact=True) + self.vars.set('value_type', self.vars.value_type) + self.vars.set('_value', self.vars.previous_value, output=False, change=True) + self.vars.set_meta('value', initial_value=self.vars.previous_value) + self.vars.set('playbook_value', self.vars.value, fact=True) + + ... + + def state_absent(self): + with self.runner("state key", output_process=self._make_process(False)) as ctx: + ctx.run() + self.vars.set('run_info', ctx.run_info, verbosity=4) + self.vars.set('new_value', None, fact=True) + self.vars._value = None + + def state_present(self): + with self.runner("direct config_source value_type state key value", output_process=self._make_process(True)) as ctx: + ctx.run() + self.vars.set('run_info', ctx.run_info, verbosity=4) + self.vars.set('new_value', self._get(), fact=True) + self.vars._value = self.vars.new_value + +Note that the method ``__run__()`` is implemented in ``StateModuleHelper``, all you need to implement are the methods ``state_``. +In the example above, :ansplugin:`community.general.gconftool2#module` only has two states, ``present`` and ``absent``, thus, ``state_present()`` and ``state_absent()``. + +If the controlling parameter is not called ``state``, like in :ansplugin:`community.general.jira#module` module, just let SMH know about it: + +.. code-block:: python + + class JIRA(StateModuleHelper): + state_param = 'operation' + + def operation_create(self): + ... + + def operation_search(self): + ... + +Lastly, if the module is called with ``state=somevalue`` and the method ``state_somevalue`` +is not implemented, SMH will resort to call a method called ``__state_fallback__()``. +By default, this method will raise a ``ValueError`` indicating the method was not found. +Naturally, you can override that method to write a default implementation, as in :ansplugin:`community.general.locale_gen#module`: + +.. code-block:: python + + def __state_fallback__(self): + if self.vars.state_tracking == self.vars.state: + return + if self.vars.ubuntu_mode: + self.apply_change_ubuntu(self.vars.state, self.vars.name) + else: + self.apply_change(self.vars.state, self.vars.name) + +That module has only the states ``present`` and ``absent`` and the code for both is the one in the fallback method. + +.. note:: + + The name of the fallback method **does not change** if you set a different value of ``state_param``. + + +Other Conveniences +^^^^^^^^^^^^^^^^^^ + +Delegations to AnsibleModule +"""""""""""""""""""""""""""" + +The MH properties and methods below are delegated as-is to the underlying ``AnsibleModule`` instance in ``self.module``: + +- ``check_mode`` +- ``get_bin_path()`` +- ``warn()`` +- ``deprecate()`` + +Additionally, MH will also delegate: + +- ``diff_mode`` to ``self.module._diff`` +- ``verbosity`` to ``self.module._verbosity`` + +Starting in community.general 10.3.0, MH will also delegate the method ``debug`` to ``self.module``. +If any existing module already has a ``debug`` attribute defined, a warning message will be generated, +requesting it to be renamed. Upon the release of community.general 12.0.0, the delegation will be +preemptive and will override any existing method or property in the subclasses. + +Decorators +"""""""""" + +The following decorators should only be used within ``ModuleHelper`` class. + +@cause_changes +-------------- + +This decorator will control whether the outcome of the method will cause the module to signal change in its output. +If the method completes without raising an exception it is considered to have succeeded, otherwise, it will have failed. + +The decorator has a parameter ``when`` that accepts three different values: ``success``, ``failure``, and ``always``. +There are also two legacy parameters, ``on_success`` and ``on_failure``, that will be deprecated, so do not use them. +The value of ``changed`` in the module output will be set to ``True``: + +- ``when="success"`` and the method completes without raising an exception. +- ``when="failure"`` and the method raises an exception. +- ``when="always"``, regardless of the method raising an exception or not. + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.module_helper import cause_changes + + # adapted excerpt from the community.general.jira module + class JIRA(StateModuleHelper): + @cause_changes(when="success") + def operation_create(self): + ... + +If ``when`` has a different value or no parameters are specificied, the decorator will have no effect whatsoever. + +.. _ansible_collections.community.general.docsite.guide_modulehelper.modulefailsdeco: + +@module_fails_on_exception +-------------------------- + +In a method using this decorator, if an exception is raised, the text message of that exception will be captured +by the decorator and used to call ``self.module.fail_json()``. +In most of the cases there will be no need to use this decorator, because ``ModuleHelper.run()`` already uses it. + +@check_mode_skip +---------------- + +If the module is running in check mode, this decorator will prevent the method from executing. +The return value in that case is ``None``. + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.module_helper import check_mode_skip + + # adapted excerpt from the community.general.locale_gen module + class LocaleGen(StateModuleHelper): + @check_mode_skip + def __state_fallback__(self): + ... + + +@check_mode_skip_returns +------------------------ + +This decorator is similar to the previous one, but the developer can control the return value for the method when running in check mode. +It is used with one of two parameters. One is ``callable`` and the return value in check mode will be ``callable(self, *args, **kwargs)``, +where ``self`` is the ``ModuleHelper`` instance and the union of ``args`` and ``kwargs`` will contain all the parameters passed to the method. + +The other option is to use the parameter ``value``, in which case the method will return ``value`` when in check mode. + + +References +^^^^^^^^^^ + +- `Ansible Developer Guide `_ +- `Creating a module `_ +- `Returning ansible facts `_ +- :ref:`ansible_collections.community.general.docsite.guide_vardict` + + +.. versionadded:: 3.1.0 diff --git a/docs/docsite/rst/guide_online.rst b/docs/docsite/rst/guide_online.rst new file mode 100644 index 0000000000..c233b403e8 --- /dev/null +++ b/docs/docsite/rst/guide_online.rst @@ -0,0 +1,49 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_online: + +**************** +Online.net Guide +**************** + +Introduction +============ + +Online is a French hosting company mainly known for providing bare-metal servers named Dedibox. +Check it out: `https://www.online.net/en `_ + +Dynamic inventory for Online resources +-------------------------------------- + +Ansible has a dynamic inventory plugin that can list your resources. + +1. Create a YAML configuration such as ``online_inventory.yml`` with this content: + + .. code-block:: yaml + + plugin: community.general.online + +2. Set your ``ONLINE_TOKEN`` environment variable with your token. + + You need to open an account and log into it before you can get a token. + You can find your token at the following page: `https://console.online.net/en/api/access `_ + +3. You can test that your inventory is working by running: + + .. code-block:: console + + $ ansible-inventory -v -i online_inventory.yml --list + + +4. Now you can run your playbook or any other module with this inventory: + + .. code-block:: ansible-output + + $ ansible all -i online_inventory.yml -m ping + sd-96735 | SUCCESS => { + "changed": false, + "ping": "pong" + } diff --git a/docs/docsite/rst/guide_packet.rst b/docs/docsite/rst/guide_packet.rst new file mode 100644 index 0000000000..95b38dddd0 --- /dev/null +++ b/docs/docsite/rst/guide_packet.rst @@ -0,0 +1,214 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_packet: + +********************************** +Packet.net Guide +********************************** + +Introduction +============ + +`Packet.net `_ is a bare metal infrastructure host that is supported by the community.general collection through six cloud modules. The six modules are: + +- :ansplugin:`community.general.packet_device#module`: manages servers on Packet. You can use this module to create, restart and delete devices. +- :ansplugin:`community.general.packet_ip_subnet#module`: assign IP subnet to a bare metal server +- :ansplugin:`community.general.packet_project#module`: create/delete a project in Packet host +- :ansplugin:`community.general.packet_sshkey#module`: adds a public SSH key from file or value to the Packet infrastructure. Every subsequently-created device will have this public key installed in .ssh/authorized_keys. +- :ansplugin:`community.general.packet_volume#module`: create/delete a volume in Packet host +- :ansplugin:`community.general.packet_volume_attachment#module`: attach/detach a volume to a device in the Packet host + +Note, this guide assumes you are familiar with Ansible and how it works. If you are not, have a look at their :ref:`docs ` before getting started. + +Requirements +============ + +The Packet modules connect to the Packet API using the `packet-python package `_. You can install it with pip: + +.. code-block:: console + + $ pip install packet-python + +In order to check the state of devices created by Ansible on Packet, it is a good idea to install one of the `Packet CLI clients `_. Otherwise you can check them through the `Packet portal `_. + +To use the modules you will need a Packet API token. You can generate an API token through the Packet portal `here `__. The simplest way to authenticate yourself is to set the Packet API token in an environment variable: + +.. code-block:: console + + $ export PACKET_API_TOKEN=Bfse9F24SFtfs423Gsd3ifGsd43sSdfs + +If you are not comfortable exporting your API token, you can pass it as a parameter to the modules. + +On Packet, devices and reserved IP addresses belong to `projects `_. In order to use the packet_device module, you need to specify the UUID of the project in which you want to create or manage devices. You can find a project's UUID in the Packet portal `here `_ (it is just under the project table) or through one of the available `CLIs `_. + + +If you want to use a new SSH key pair in this tutorial, you can generate it to ``./id_rsa`` and ``./id_rsa.pub`` as: + +.. code-block:: console + + $ ssh-keygen -t rsa -f ./id_rsa + +If you want to use an existing key pair, just copy the private and public key over to the playbook directory. + + +Device Creation +=============== + +The following code block is a simple playbook that creates one `Type 0 `_ server (the ``plan`` parameter). You have to supply ``plan`` and ``operating_system``. ``location`` defaults to ``ewr1`` (Parsippany, NJ). You can find all the possible values for the parameters through a `CLI client `_. + +.. code-block:: yaml+jinja + + # playbook_create.yml + + - name: Create Ubuntu device + hosts: localhost + tasks: + + - community.general.packet_sshkey: + key_file: ./id_rsa.pub + label: tutorial key + + - community.general.packet_device: + project_id: + hostnames: myserver + operating_system: ubuntu_16_04 + plan: baremetal_0 + facility: sjc1 + +After running ``ansible-playbook playbook_create.yml``, you should have a server provisioned on Packet. You can verify through a CLI or in the `Packet portal `__. + +If you get an error with the message "failed to set machine state present, error: Error 404: Not Found", please verify your project UUID. + + +Updating Devices +================ + +The two parameters used to uniquely identify Packet devices are: "device_ids" and "hostnames". Both parameters accept either a single string (later converted to a one-element list), or a list of strings. + +The ``device_ids`` and ``hostnames`` parameters are mutually exclusive. The following values are all acceptable: + +- device_ids: ``a27b7a83-fc93-435b-a128-47a5b04f2dcf`` + +- hostnames: ``mydev1`` + +- device_ids: ``[a27b7a83-fc93-435b-a128-47a5b04f2dcf, 4887130f-0ccd-49a0-99b0-323c1ceb527b]`` + +- hostnames: ``[mydev1, mydev2]`` + +In addition, hostnames can contain a special ``%d`` formatter along with a ``count`` parameter that lets you easily expand hostnames that follow a simple name and number pattern; in other words, ``hostnames: "mydev%d", count: 2`` will expand to [mydev1, mydev2]. + +If your playbook acts on existing Packet devices, you can only pass the ``hostname`` and ``device_ids`` parameters. The following playbook shows how you can reboot a specific Packet device by setting the ``hostname`` parameter: + +.. code-block:: yaml+jinja + + # playbook_reboot.yml + + - name: reboot myserver + hosts: localhost + tasks: + + - community.general.packet_device: + project_id: + hostnames: myserver + state: rebooted + +You can also identify specific Packet devices with the ``device_ids`` parameter. The device's UUID can be found in the `Packet Portal `_ or by using a `CLI `_. The following playbook removes a Packet device using the ``device_ids`` field: + +.. code-block:: yaml+jinja + + # playbook_remove.yml + + - name: remove a device + hosts: localhost + tasks: + + - community.general.packet_device: + project_id: + device_ids: + state: absent + + +More Complex Playbooks +====================== + +In this example, we will create a CoreOS cluster with `user data `_. + + +The CoreOS cluster will use `etcd `_ for discovery of other servers in the cluster. Before provisioning your servers, you will need to generate a discovery token for your cluster: + +.. code-block:: console + + $ curl -w "\n" 'https://discovery.etcd.io/new?size=3' + +The following playbook will create an SSH key, 3 Packet servers, and then wait until SSH is ready (or until 5 minutes passed). Make sure to substitute the discovery token URL in ``user_data``, and the ``project_id`` before running ``ansible-playbook``. Also, feel free to change ``plan`` and ``facility``. + +.. code-block:: yaml+jinja + + # playbook_coreos.yml + + - name: Start 3 CoreOS nodes in Packet and wait until SSH is ready + hosts: localhost + tasks: + + - community.general.packet_sshkey: + key_file: ./id_rsa.pub + label: new + + - community.general.packet_device: + hostnames: [coreos-one, coreos-two, coreos-three] + operating_system: coreos_beta + plan: baremetal_0 + facility: ewr1 + project_id: + wait_for_public_IPv: 4 + user_data: | + # cloud-config + coreos: + etcd2: + discovery: https://discovery.etcd.io/ + advertise-client-urls: http://$private_ipv4:2379,http://$private_ipv4:4001 + initial-advertise-peer-urls: http://$private_ipv4:2380 + listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 + listen-peer-urls: http://$private_ipv4:2380 + fleet: + public-ip: $private_ipv4 + units: + - name: etcd2.service + command: start + - name: fleet.service + command: start + register: newhosts + + - name: wait for ssh + ansible.builtin.wait_for: + delay: 1 + host: "{{ item.public_ipv4 }}" + port: 22 + state: started + timeout: 500 + loop: "{{ newhosts.results[0].devices }}" + + +As with most Ansible modules, the default states of the Packet modules are idempotent, meaning the resources in your project will remain the same after re-runs of a playbook. Thus, we can keep the ``packet_sshkey`` module call in our playbook. If the public key is already in your Packet account, the call will have no effect. + +The second module call provisions 3 Packet Type 0 (specified using the ``plan`` parameter) servers in the project identified by the ``project_id`` parameter. The servers are all provisioned with CoreOS beta (the ``operating_system`` parameter) and are customized with cloud-config user data passed to the ``user_data`` parameter. + +The ``packet_device`` module has a ``wait_for_public_IPv`` that is used to specify the version of the IP address to wait for (valid values are ``4`` or ``6`` for IPv4 or IPv6). If specified, Ansible will wait until the GET API call for a device contains an Internet-routeable IP address of the specified version. When referring to an IP address of a created device in subsequent module calls, it is wise to use the ``wait_for_public_IPv`` parameter, or ``state: active`` in the packet_device module call. + +Run the playbook: + +.. code-block:: console + + $ ansible-playbook playbook_coreos.yml + +Once the playbook quits, your new devices should be reachable through SSH. Try to connect to one and check if etcd has started properly: + +.. code-block:: console + + tomk@work $ ssh -i id_rsa core@$one_of_the_servers_ip + core@coreos-one ~ $ etcdctl cluster-health + +If you have any questions or comments let us know! help@packet.net diff --git a/docs/docsite/rst/guide_scaleway.rst b/docs/docsite/rst/guide_scaleway.rst new file mode 100644 index 0000000000..f3b7b24e0e --- /dev/null +++ b/docs/docsite/rst/guide_scaleway.rst @@ -0,0 +1,320 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_scaleway: + +************** +Scaleway Guide +************** + +Introduction +============ + +`Scaleway `_ is a cloud provider supported by the community.general collection through a set of plugins and modules. +Those modules are: + +- :ansplugin:`community.general.scaleway_compute#module`: manages servers on Scaleway. You can use this module to create, restart and delete servers. +- :ansplugin:`community.general.scaleway_compute_private_network#module` +- :ansplugin:`community.general.scaleway_container#module` +- :ansplugin:`community.general.scaleway_container_info#module` +- :ansplugin:`community.general.scaleway_container_namespace_info#module` +- :ansplugin:`community.general.scaleway_container_namespace#module` +- :ansplugin:`community.general.scaleway_container_registry_info#module` +- :ansplugin:`community.general.scaleway_container_registry#module` +- :ansplugin:`community.general.scaleway_database_backup#module` +- :ansplugin:`community.general.scaleway_function#module` +- :ansplugin:`community.general.scaleway_function_info#module` +- :ansplugin:`community.general.scaleway_function_namespace_info#module` +- :ansplugin:`community.general.scaleway_function_namespace#module` +- :ansplugin:`community.general.scaleway_image_info#module` +- :ansplugin:`community.general.scaleway_ip#module` +- :ansplugin:`community.general.scaleway_ip_info#module` +- :ansplugin:`community.general.scaleway_lb#module` +- :ansplugin:`community.general.scaleway_organization_info#module` +- :ansplugin:`community.general.scaleway_private_network#module` +- :ansplugin:`community.general.scaleway_security_group#module` +- :ansplugin:`community.general.scaleway_security_group_info#module` +- :ansplugin:`community.general.scaleway_security_group_rule#module` +- :ansplugin:`community.general.scaleway_server_info#module` +- :ansplugin:`community.general.scaleway_snapshot_info#module` +- :ansplugin:`community.general.scaleway_sshkey#module`: adds a public SSH key from a file or value to the Packet infrastructure. Every subsequently-created device will have this public key installed in .ssh/authorized_keys. +- :ansplugin:`community.general.scaleway_user_data#module` +- :ansplugin:`community.general.scaleway_volume#module`: manages volumes on Scaleway. +- :ansplugin:`community.general.scaleway_volume_info#module` + +The plugins are: + +- :ansplugin:`community.general.scaleway#inventory`: inventory plugin + + +.. note:: + This guide assumes you are familiar with Ansible and how it works. + If you are not, have a look at :ref:`ansible_documentation` before getting started. + +Requirements +============ + +The Scaleway modules and inventory script connect to the Scaleway API using `Scaleway REST API `_. +To use the modules and inventory script you will need a Scaleway API token. +You can generate an API token through the `Scaleway console's credential page `__. +The simplest way to authenticate yourself is to set the Scaleway API token in an environment variable: + +.. code-block:: console + + $ export SCW_TOKEN=00000000-1111-2222-3333-444444444444 + +If you are not comfortable exporting your API token, you can pass it as a parameter to the modules using the ``api_token`` argument. + +If you want to use a new SSH key pair in this tutorial, you can generate it to ``./id_rsa`` and ``./id_rsa.pub`` as: + +.. code-block:: console + + $ ssh-keygen -t rsa -f ./id_rsa + +If you want to use an existing key pair, just copy the private and public key over to the playbook directory. + +How to add an SSH key? +====================== + +Connection to Scaleway Compute nodes use Secure Shell. +SSH keys are stored at the account level, which means that you can reuse the same SSH key in multiple nodes. +The first step to configure Scaleway compute resources is to have at least one SSH key configured. + +:ansplugin:`community.general.scaleway_sshkey#module` is a module that manages SSH keys on your Scaleway account. +You can add an SSH key to your account by including the following task in a playbook: + +.. code-block:: yaml+jinja + + - name: "Add SSH key" + community.general.scaleway_sshkey: + ssh_pub_key: "ssh-rsa AAAA..." + state: "present" + +The ``ssh_pub_key`` parameter contains your ssh public key as a string. Here is an example inside a playbook: + + +.. code-block:: yaml+jinja + + - name: Test SSH key lifecycle on a Scaleway account + hosts: localhost + gather_facts: false + environment: + SCW_API_KEY: "" + + tasks: + + - community.general.scaleway_sshkey: + ssh_pub_key: "ssh-rsa AAAAB...424242 developer@example.com" + state: present + register: result + + - ansible.builtin.assert: + that: + - result is success and result is changed + +How to create a compute instance? +================================= + +Now that we have an SSH key configured, the next step is to spin up a server! +:ansplugin:`community.general.scaleway_compute#module` is a module that can create, update and delete Scaleway compute instances: + +.. code-block:: yaml+jinja + + - name: Create a server + community.general.scaleway_compute: + name: foobar + state: present + image: 00000000-1111-2222-3333-444444444444 + organization: 00000000-1111-2222-3333-444444444444 + region: ams1 + commercial_type: START1-S + +Here are the parameter details for the example shown above: + +- ``name`` is the name of the instance (the one that will show up in your web console). +- ``image`` is the UUID of the system image you would like to use. + A list of all images is available for each availability zone. +- ``organization`` represents the organization that your account is attached to. +- ``region`` represents the Availability Zone which your instance is in (for this example, ``par1`` and ``ams1``). +- ``commercial_type`` represents the name of the commercial offers. + You can check out the Scaleway pricing page to find which instance is right for you. + +Take a look at this short playbook to see a working example using ``scaleway_compute``: + +.. code-block:: yaml+jinja + + - name: Test compute instance lifecycle on a Scaleway account + hosts: localhost + gather_facts: false + environment: + SCW_API_KEY: "" + + tasks: + + - name: Create a server + register: server_creation_task + community.general.scaleway_compute: + name: foobar + state: present + image: 00000000-1111-2222-3333-444444444444 + organization: 00000000-1111-2222-3333-444444444444 + region: ams1 + commercial_type: START1-S + wait: true + + - ansible.builtin.debug: + var: server_creation_task + + - ansible.builtin.assert: + that: + - server_creation_task is success + - server_creation_task is changed + + - name: Run it + community.general.scaleway_compute: + name: foobar + state: running + image: 00000000-1111-2222-3333-444444444444 + organization: 00000000-1111-2222-3333-444444444444 + region: ams1 + commercial_type: START1-S + wait: true + tags: + - web_server + register: server_run_task + + - ansible.builtin.debug: + var: server_run_task + + - ansible.builtin.assert: + that: + - server_run_task is success + - server_run_task is changed + +Dynamic Inventory Plugin +======================== + +Ansible ships with :ansplugin:`community.general.scaleway#inventory`. +You can now get a complete inventory of your Scaleway resources through this plugin and filter it on +different parameters (``regions`` and ``tags`` are currently supported). + +Let us create an example! +Suppose that we want to get all hosts that got the tag web_server. +Create a file named ``scaleway_inventory.yml`` with the following content: + +.. code-block:: yaml+jinja + + plugin: community.general.scaleway + regions: + - ams1 + - par1 + tags: + - web_server + +This inventory means that we want all hosts that got the tag ``web_server`` on the zones ``ams1`` and ``par1``. +Once you have configured this file, you can get the information using the following command: + +.. code-block:: console + + $ ansible-inventory --list -i scaleway_inventory.yml + +The output will be: + +.. code-block:: json + + { + "_meta": { + "hostvars": { + "dd8e3ae9-0c7c-459e-bc7b-aba8bfa1bb8d": { + "ansible_verbosity": 6, + "arch": "x86_64", + "commercial_type": "START1-S", + "hostname": "foobar", + "ipv4": "192.0.2.1", + "organization": "00000000-1111-2222-3333-444444444444", + "state": "running", + "tags": [ + "web_server" + ] + } + } + }, + "all": { + "children": [ + "ams1", + "par1", + "ungrouped", + "web_server" + ] + }, + "ams1": {}, + "par1": { + "hosts": [ + "dd8e3ae9-0c7c-459e-bc7b-aba8bfa1bb8d" + ] + }, + "ungrouped": {}, + "web_server": { + "hosts": [ + "dd8e3ae9-0c7c-459e-bc7b-aba8bfa1bb8d" + ] + } + } + +As you can see, we get different groups of hosts. +``par1`` and ``ams1`` are groups based on location. +``web_server`` is a group based on a tag. + +In case a filter parameter is not defined, the plugin supposes all values possible are wanted. +This means that for each tag that exists on your Scaleway compute nodes, a group based on each tag will be created. + +Scaleway S3 object storage +========================== + +`Object Storage `_ allows you to store any kind of objects (documents, images, videos, and so on). +As the Scaleway API is S3 compatible, Ansible supports it natively through the amazon.aws modules: :ansplugin:`amazon.aws.s3_bucket#module`, :ansplugin:`amazon.aws.s3_object#module`. + +You can find many examples in the `scaleway_s3 integration tests `_. + +.. code-block:: yaml+jinja + + - hosts: myserver + vars: + scaleway_region: nl-ams + s3_url: https://s3.nl-ams.scw.cloud + environment: + # AWS_ACCESS_KEY matches your scaleway organization id available at https://cloud.scaleway.com/#/account + AWS_ACCESS_KEY: 00000000-1111-2222-3333-444444444444 + # AWS_SECRET_KEY matches a secret token that you can retrieve at https://cloud.scaleway.com/#/credentials + AWS_SECRET_KEY: aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee + module_defaults: + group/amazon.aws.aws: + s3_url: '{{ s3_url }}' + region: '{{ scaleway_region }}' + tasks: + # use a fact instead of a variable, otherwise template is evaluate each time variable is used + - ansible.builtin.set_fact: + bucket_name: "{{ 99999999 | random | to_uuid }}" + + # "requester_pays:" is mandatory because Scaleway does not implement related API + # another way is to use amazon.aws.s3_object and "mode: create" ! + - amazon.aws.s3_bucket: + name: '{{ bucket_name }}' + requester_pays: + + - name: Another way to create the bucket + amazon.aws.s3_object: + bucket: '{{ bucket_name }}' + mode: create + encrypt: false + register: bucket_creation_check + + - name: add something in the bucket + amazon.aws.s3_object: + mode: put + bucket: '{{ bucket_name }}' + src: /tmp/test.txt # needs to be created before + object: test.txt + encrypt: false # server side encryption must be disabled diff --git a/docs/docsite/rst/guide_uthelper.rst b/docs/docsite/rst/guide_uthelper.rst new file mode 100644 index 0000000000..c4a4110d70 --- /dev/null +++ b/docs/docsite/rst/guide_uthelper.rst @@ -0,0 +1,394 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_uthelper: + +UTHelper Guide +============== + +Introduction +^^^^^^^^^^^^ + +``UTHelper`` was written to reduce the boilerplate code used in unit tests for modules. +It was originally written to handle tests of modules that run external commands using ``AnsibleModule.run_command()``. +At the time of writing (Feb 2025) that remains the only type of tests you can use +``UTHelper`` for, but it aims to provide support for other types of interactions. + +Until now, there are many different ways to implement unit tests that validate a module based on the execution of external commands. See some examples: + +* `test_apk.py `_ - A very simple one +* `test_bootc_manage.py `_ - + This one has more test cases, but do notice how the code is repeated amongst them. +* `test_modprobe.py `_ - + This one has 15 tests in it, but to achieve that it declares 8 classes repeating quite a lot of code. + +As you can notice, there is no consistency in the way these tests are executed - +they all do the same thing eventually, but each one is written in a very distinct way. + +``UTHelper`` aims to: + +* provide a consistent idiom to define unit tests +* reduce the code to a bare minimal, and +* define tests as data instead +* allow the test cases definition to be expressed not only as a Python data structure but also as YAML content + +Quickstart +"""""""""" + +To use UTHelper, your test module will need only a bare minimal of code: + +.. code-block:: python + + # tests/unit/plugin/modules/test_ansible_module.py + from ansible_collections.community.general.plugins.modules import ansible_module + from .uthelper import UTHelper, RunCommandMock + + + UTHelper.from_module(ansible_module, __name__, mocks=[RunCommandMock]) + +Then, in the test specification file, you have: + +.. code-block:: yaml + + # tests/unit/plugin/modules/test_ansible_module.yaml + test_cases: + - id: test_ansible_module + flags: + diff: true + input: + state: present + name: Roger the Shrubber + output: + shrubbery: + looks: nice + price: not too expensive + changed: true + diff: + before: + shrubbery: null + after: + shrubbery: + looks: nice + price: not too expensive + mocks: + run_command: + - command: [/testbin/shrubber, --version] + rc: 0 + out: "2.80.0\n" + err: '' + - command: [/testbin/shrubber, --make-shrubbery] + rc: 0 + out: 'Shrubbery created' + err: '' + +.. note:: + + If you prefer to pick a different YAML file for the test cases, or if you prefer to define them in plain Python, + you can use the convenience methods ``UTHelper.from_file()`` and ``UTHelper.from_spec()``, respectively. + See more details below. + + +Using ``UTHelper`` +^^^^^^^^^^^^^^^^^^ + +Test Module +""""""""""" + +``UTHelper`` is **strictly for unit tests**. To use it, you import the ``.uthelper.UTHelper`` class. +As mentioned in different parts of this guide, there are three different mechanisms to load the test cases. + +.. seealso:: + + See the UTHelper class reference below for API details on the three different mechanisms. + + +The easies and most recommended way of using ``UTHelper`` is literally the example shown. +See a real world example at +`test_gconftool2.py `_. + +The ``from_module()`` method will pick the filename of the test module up (in the example above, ``tests/unit/plugins/modules/test_gconftool2.py``) +and it will search for ``tests/unit/plugins/modules/test_gconftool2.yaml`` (or ``.yml`` if that is not found). +In that file it will expect to find the test specification expressed in YAML format, conforming to the structure described below LINK LINK LINK. + +If you prefer to read the test specifications a different file path, use ``from_file()`` passing the file handle for the YAML file. + +And, if for any reason you prefer or need to pass the data structure rather than dealing with YAML files, use the ``from_spec()`` method. +A real world example for that can be found at +`test_snap.py `_. + + +Test Specification +"""""""""""""""""" + +The structure of the test specification data is described below. + +Top level +--------- + +At the top level there are two accepted keys: + +- ``anchors: dict`` + Optional. Placeholder for you to define YAML anchors that can be repeated in the test cases. + Its contents are never accessed directly by test Helper. +- ``test_cases: list`` + Mandatory. List of test cases, see below for definition. + +Test cases +---------- + +You write the test cases with five elements: + +- ``id: str`` + Mandatory. Used to identify the test case. + +- ``flags: dict`` + Optional. Flags controling the behavior of the test case. All flags are optional. Accepted flags: + + * ``check: bool``: set to ``true`` if the module is to be executed in **check mode**. + * ``diff: bool``: set to ``true`` if the module is to be executed in **diff mode**. + * ``skip: str``: set the test case to be skipped, providing the message for ``pytest.skip()``. + * ``xfail: str``: set the test case to expect failure, providing the message for ``pytest.xfail()``. + +- ``input: dict`` + Optional. Parameters for the Ansible module, it can be empty. + +- ``output: dict`` + Optional. Expected return values from the Ansible module. + All RV names are used here are expected to be found in the module output, but not all RVs in the output must be here. + It can include special RVs such as ``changed`` and ``diff``. + It can be empty. + +- ``mocks: dict`` + Optional. Mocked interactions, ``run_command`` being the only one supported for now. + Each key in this dictionary refers to one subclass of ``TestCaseMock`` and its + structure is dictated by the ``TestCaseMock`` subclass implementation. + All keys are expected to be named using snake case, as in ``run_command``. + The ``TestCaseMock`` subclass is responsible for defining the name used in the test specification. + The structure for that specification is dependent on the implementing class. + See more details below for the implementation of ``RunCommandMock`` + +Example using YAML +------------------ + +We recommend you use ``UTHelper`` reading the test specifications from a YAML file. +See an example below of how one actually looks like (excerpt from ``test_opkg.yaml``): + +.. code-block:: yaml + + --- + anchors: + environ: &env-def {environ_update: {LANGUAGE: C, LC_ALL: C}, check_rc: false} + test_cases: + - id: install_zlibdev + input: + name: zlib-dev + state: present + output: + msg: installed 1 package(s) + mocks: + run_command: + - command: [/testbin/opkg, --version] + environ: *env-def + rc: 0 + out: '' + err: '' + - command: [/testbin/opkg, list-installed, zlib-dev] + environ: *env-def + rc: 0 + out: '' + err: '' + - command: [/testbin/opkg, install, zlib-dev] + environ: *env-def + rc: 0 + out: | + Installing zlib-dev (1.2.11-6) to root... + Downloading https://downloads.openwrt.org/releases/22.03.0/packages/mips_24kc/base/zlib-dev_1.2.11-6_mips_24kc.ipk + Installing zlib (1.2.11-6) to root... + Downloading https://downloads.openwrt.org/releases/22.03.0/packages/mips_24kc/base/zlib_1.2.11-6_mips_24kc.ipk + Configuring zlib. + Configuring zlib-dev. + err: '' + - command: [/testbin/opkg, list-installed, zlib-dev] + environ: *env-def + rc: 0 + out: | + zlib-dev - 1.2.11-6 + err: '' + - id: install_zlibdev_present + input: + name: zlib-dev + state: present + output: + msg: package(s) already present + mocks: + run_command: + - command: [/testbin/opkg, --version] + environ: *env-def + rc: 0 + out: '' + err: '' + - command: [/testbin/opkg, list-installed, zlib-dev] + environ: *env-def + rc: 0 + out: | + zlib-dev - 1.2.11-6 + err: '' + +TestCaseMocks Specifications +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``TestCaseMock`` subclass is free to define the expected data structure. + +RunCommandMock Specification +"""""""""""""""""""""""""""" + +``RunCommandMock`` mocks can be specified with the key ``run_command`` and it expects a ``list`` in which elements follow the structure: + +- ``command: Union[list, str]`` + Mandatory. The command that is expected to be executed by the module. It corresponds to the parameter ``args`` of the ``AnsibleModule.run_command()`` call. + It can be either a list or a string, though the list form is generally recommended. +- ``environ: dict`` + Mandatory. All other parameters passed to the ``AnsibleModule.run_command()`` call. + Most commonly used are ``environ_update`` and ``check_rc``. + Must include all parameters the Ansible module uses in the ``AnsibleModule.run_command()`` call, otherwise the test will fail. +- ``rc: int`` + Mandatory. The return code for the command execution. + As per usual in bash scripting, a value of ``0`` means success, whereas any other number is an error code. +- ``out: str`` + Mandatory. The *stdout* result of the command execution, as one single string containing zero or more lines. +- ``err: str`` + Mandatory. The *stderr* result of the command execution, as one single string containing zero or more lines. + + +``UTHelper`` Reference +^^^^^^^^^^^^^^^^^^^^^^ + +.. py:module:: .uthelper + + .. py:class:: UTHelper + + A class to encapsulate unit tests. + + .. py:staticmethod:: from_spec(ansible_module, test_module, test_spec, mocks=None) + + Creates an ``UTHelper`` instance from a given test specification. + + :param ansible_module: The Ansible module to be tested. + :type ansible_module: :py:class:`types.ModuleType` + :param test_module: The test module. + :type test_module: :py:class:`types.ModuleType` + :param test_spec: The test specification. + :type test_spec: dict + :param mocks: List of ``TestCaseMocks`` to be used during testing. Currently only ``RunCommandMock`` exists. + :type mocks: list or None + :return: An ``UTHelper`` instance. + :rtype: UTHelper + + Example usage of ``from_spec()``: + + .. code-block:: python + + import sys + + from ansible_collections.community.general.plugins.modules import ansible_module + from .uthelper import UTHelper, RunCommandMock + + TEST_SPEC = dict( + test_cases=[ + ... + ] + ) + + helper = UTHelper.from_spec(ansible_module, sys.modules[__name__], TEST_SPEC, mocks=[RunCommandMock]) + + .. py:staticmethod:: from_file(ansible_module, test_module, test_spec_filehandle, mocks=None) + + Creates an ``UTHelper`` instance from a test specification file. + + :param ansible_module: The Ansible module to be tested. + :type ansible_module: :py:class:`types.ModuleType` + :param test_module: The test module. + :type test_module: :py:class:`types.ModuleType` + :param test_spec_filehandle: A file handle to an file stream handle providing the test specification in YAML format. + :type test_spec_filehandle: ``file-like object`` + :param mocks: List of ``TestCaseMocks`` to be used during testing. Currently only ``RunCommandMock`` exists. + :type mocks: list or None + :return: An ``UTHelper`` instance. + :rtype: UTHelper + + Example usage of ``from_file()``: + + .. code-block:: python + + import sys + + from ansible_collections.community.general.plugins.modules import ansible_module + from .uthelper import UTHelper, RunCommandMock + + with open("test_spec.yaml", "r") as test_spec_filehandle: + helper = UTHelper.from_file(ansible_module, sys.modules[__name__], test_spec_filehandle, mocks=[RunCommandMock]) + + .. py:staticmethod:: from_module(ansible_module, test_module_name, mocks=None) + + Creates an ``UTHelper`` instance from a given Ansible module and test module. + + :param ansible_module: The Ansible module to be tested. + :type ansible_module: :py:class:`types.ModuleType` + :param test_module_name: The name of the test module. It works if passed ``__name__``. + :type test_module_name: str + :param mocks: List of ``TestCaseMocks`` to be used during testing. Currently only ``RunCommandMock`` exists. + :type mocks: list or None + :return: An ``UTHelper`` instance. + :rtype: UTHelper + + Example usage of ``from_module()``: + + .. code-block:: python + + from ansible_collections.community.general.plugins.modules import ansible_module + from .uthelper import UTHelper, RunCommandMock + + # Example usage + helper = UTHelper.from_module(ansible_module, __name__, mocks=[RunCommandMock]) + + +Creating TestCaseMocks +^^^^^^^^^^^^^^^^^^^^^^ + +To create a new ``TestCaseMock`` you must extend that class and implement the relevant parts: + +.. code-block:: python + + class ShrubberyMock(TestCaseMock): + # this name is mandatory, it is the name used in the test specification + name = "shrubbery" + + def setup(self, mocker): + # perform setup, commonly using mocker to patch some other piece of code + ... + + def check(self, test_case, results): + # verify the tst execution met the expectations of the test case + # for example the function was called as many times as it should + ... + + def fixtures(self): + # returns a dict mapping names to pytest fixtures that should be used for the test case + # for example, in RunCommandMock it creates a fixture that patches AnsibleModule.get_bin_path + ... + +Caveats +^^^^^^^ + +Known issues/opportunities for improvement: + +* Only one ``UTHelper`` per test module: UTHelper injects a test function with a fixed name into the module's namespace, + so placing a second ``UTHelper`` instance is going to overwrite the function created by the first one. +* Order of elements in module's namespace is not consistent across executions in Python 3.5, so if adding more tests to the test module + might make Test Helper add its function before or after the other test functions. + In the community.general collection the CI processes uses ``pytest-xdist`` to paralellize and distribute the tests, + and it requires the order of the tests to be consistent. + +.. versionadded:: 7.5.0 diff --git a/docs/docsite/rst/guide_vardict.rst b/docs/docsite/rst/guide_vardict.rst new file mode 100644 index 0000000000..1beef0c57f --- /dev/null +++ b/docs/docsite/rst/guide_vardict.rst @@ -0,0 +1,176 @@ +.. + Copyright (c) Ansible Project + GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) + SPDX-License-Identifier: GPL-3.0-or-later + +.. _ansible_collections.community.general.docsite.guide_vardict: + +VarDict Guide +============= + +Introduction +^^^^^^^^^^^^ + +The ``ansible_collections.community.general.plugins.module_utils.vardict`` module util provides the +``VarDict`` class to help manage the module variables. That class is a container for module variables, +especially the ones for which the module must keep track of state changes, and the ones that should +be published as return values. + +Each variable has extra behaviors controlled by associated metadata, simplifying the generation of +output values from the module. + +Quickstart +"""""""""" + +The simplest way of using ``VarDict`` is: + +.. code-block:: python + + from ansible_collections.community.general.plugins.module_utils.vardict import VarDict + +Then in ``main()``, or any other function called from there: + +.. code-block:: python + + vars = VarDict() + + # Next 3 statements are equivalent + vars.abc = 123 + vars["abc"] = 123 + vars.set("abc", 123) + + vars.xyz = "bananas" + vars.ghi = False + +And by the time the module is about to exit: + +.. code-block:: python + + results = vars.output() + module.exit_json(**results) + +That makes the return value of the module: + +.. code-block:: json + + { + "abc": 123, + "xyz": "bananas", + "ghi": false + } + +Metadata +"""""""" + +The metadata values associated with each variable are: + +- ``output: bool`` - marks the variable for module output as a module return value. +- ``fact: bool`` - marks the variable for module output as an Ansible fact. +- ``verbosity: int`` - sets the minimum level of verbosity for which the variable will be included in the output. +- ``change: bool`` - controls the detection of changes in the variable value. +- ``initial_value: any`` - when using ``change`` and need to forcefully set an intial value to the variable. +- ``diff: bool`` - used along with ``change``, this generates an Ansible-style diff ``dict``. + +See the sections below for more details on how to use the metadata. + + +Using VarDict +^^^^^^^^^^^^^ + +Basic Usage +""""""""""" + +As shown above, variables can be accessed using the ``[]`` operator, as in a ``dict`` object, +and also as an object attribute, such as ``vars.abc``. The form using the ``set()`` +method is special in the sense that you can use it to set metadata values: + +.. code-block:: python + + vars.set("abc", 123, output=False) + vars.set("abc", 123, output=True, change=True) + +Another way to set metadata after the variables have been created is: + +.. code-block:: python + + vars.set_meta("abc", output=False) + vars.set_meta("abc", output=True, change=True, diff=True) + +You can use either operator and attribute forms to access the value of the variable. Other ways to +access its value and its metadata are: + +.. code-block:: python + + print("abc value = {0}".format(vars.var("abc")["value"])) # get the value + print("abc output? {0}".format(vars.get_meta("abc")["output"])) # get the metadata like this + +The names of methods, such as ``set``, ``get_meta``, ``output`` amongst others, are reserved and +cannot be used as variable names. If you try to use a reserved name a ``ValueError`` exception +is raised with the message "Name is reserved". + +Generating output +""""""""""""""""" + +By default, every variable create will be enable for output with minimum verbosity set to zero, in +other words, they will always be in the output by default. + +You can control that when creating the variable for the first time or later in the code: + +.. code-block:: python + + vars.set("internal", x + 4, output=False) + vars.set_meta("internal", output=False) + +You can also set the verbosity of some variable, like: + +.. code-block:: python + + vars.set("abc", x + 4) + vars.set("debug_x", x, verbosity=3) + + results = vars.output(module._verbosity) + module.exit_json(**results) + +If the module was invoked with verbosity lower than 3, then the output will only contain +the variable ``abc``. If running at higher verbosity, as in ``ansible-playbook -vvv``, +then the output will also contain ``debug_x``. + +Generating facts is very similar to regular output, but variables are not marked as facts by default. + +.. code-block:: python + + vars.set("modulefact", x + 4, fact=True) + vars.set("debugfact", x, fact=True, verbosity=3) + + results = vars.output(module._verbosity) + results["ansible_facts"] = {"module_name": vars.facts(module._verbosity)} + module.exit_json(**results) + +Handling change +""""""""""""""" + +You can use ``VarDict`` to determine whether variables have had their values changed. + +.. code-block:: python + + vars.set("abc", 42, change=True) + vars.abc = 90 + + results = vars.output() + results["changed"] = vars.has_changed + module.exit_json(**results) + +If tracking changes in variables, you may want to present the difference between the initial and the final +values of it. For that, you want to use: + +.. code-block:: python + + vars.set("abc", 42, change=True, diff=True) + vars.abc = 90 + + results = vars.output() + results["changed"] = vars.has_changed + results["diff"] = vars.diff() + module.exit_json(**results) + +.. versionadded:: 7.1.0 diff --git a/docs/docsite/rst/test_guide.rst b/docs/docsite/rst/test_guide.rst index b0b7885f9b..a1f5723df4 100644 --- a/docs/docsite/rst/test_guide.rst +++ b/docs/docsite/rst/test_guide.rst @@ -8,14 +8,14 @@ community.general Test (Plugin) Guide ===================================== -The :ref:`community.general collection ` offers currently one test plugin. +The :anscollection:`community.general collection ` offers currently one test plugin. .. contents:: Topics Feature Tests ------------- -The ``a_module`` test allows to check whether a given string refers to an existing module or action plugin. This can be useful in roles, which can use this to ensure that required modules are present ahead of time. +The :ansplugin:`community.general.a_module test ` allows to check whether a given string refers to an existing module or action plugin. This can be useful in roles, which can use this to ensure that required modules are present ahead of time. .. code-block:: yaml+jinja diff --git a/galaxy.yml b/galaxy.yml index 09c85cf2b8..0288625dbb 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -5,17 +5,17 @@ namespace: community name: general -version: 6.3.0 +version: 12.0.0 readme: README.md authors: - Ansible (https://github.com/ansible) -description: null +description: >- + The community.general collection is a part of the Ansible package and includes many modules and + plugins supported by Ansible community which are not part of more specialized community collections. license_file: COPYING -tags: [community] -# NOTE: No dependencies are expected to be added here -# dependencies: +tags: + - community repository: https://github.com/ansible-collections/community.general documentation: https://docs.ansible.com/ansible/latest/collections/community/general/ homepage: https://github.com/ansible-collections/community.general issues: https://github.com/ansible-collections/community.general/issues -#type: flatmap diff --git a/meta/runtime.yml b/meta/runtime.yml index 98a46f62dc..d2be5a89c1 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -3,18 +3,132 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -requires_ansible: '>=2.11.0' +requires_ansible: '>=2.17.0' +action_groups: + consul: + - consul_agent_check + - consul_agent_service + - consul_auth_method + - consul_binding_rule + - consul_policy + - consul_role + - consul_session + - consul_token + proxmox: + - metadata: + extend_group: + - community.proxmox.proxmox + keycloak: + - keycloak_authentication + - keycloak_authentication_required_actions + - keycloak_authz_authorization_scope + - keycloak_authz_custom_policy + - keycloak_authz_permission + - keycloak_authz_permission_info + - keycloak_client + - keycloak_client_rolemapping + - keycloak_client_rolescope + - keycloak_clientscope + - keycloak_clientscope_type + - keycloak_clientsecret_info + - keycloak_clientsecret_regenerate + - keycloak_clienttemplate + - keycloak_component + - keycloak_component_info + - keycloak_group + - keycloak_identity_provider + - keycloak_realm + - keycloak_realm_key + - keycloak_realm_keys_metadata_info + - keycloak_realm_rolemapping + - keycloak_role + - keycloak_user + - keycloak_user_federation + - keycloak_user_rolemapping + - keycloak_userprofile + scaleway: + - scaleway_compute + - scaleway_compute_private_network + - scaleway_container + - scaleway_container_info + - scaleway_container_namespace + - scaleway_container_namespace_info + - scaleway_container_registry + - scaleway_container_registry_info + - scaleway_database_backup + - scaleway_function + - scaleway_function_info + - scaleway_function_namespace + - scaleway_function_namespace_info + - scaleway_image_info + - scaleway_ip + - scaleway_ip_info + - scaleway_lb + - scaleway_organization_info + - scaleway_private_network + - scaleway_security_group + - scaleway_security_group_info + - scaleway_security_group_rule + - scaleway_server_info + - scaleway_snapshot_info + - scaleway_sshkey + - scaleway_user_data + - scaleway_volume + - scaleway_volume_info + plugin_routing: + callback: + actionable: + tombstone: + removal_version: 2.0.0 + warning_text: Use the 'default' callback plugin with 'display_skipped_hosts + = no' and 'display_ok_hosts = no' options. + full_skip: + tombstone: + removal_version: 2.0.0 + warning_text: Use the 'default' callback plugin with 'display_skipped_hosts + = no' option. + hipchat: + tombstone: + removal_version: 10.0.0 + warning_text: The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020. + osx_say: + redirect: community.general.say + stderr: + tombstone: + removal_version: 2.0.0 + warning_text: Use the 'default' callback plugin with 'display_failed_stderr + = yes' option. + yaml: + tombstone: + removal_version: 12.0.0 + warning_text: >- + The plugin has been superseded by the option `result_format=yaml` in callback plugin ansible.builtin.default from ansible-core 2.13 onwards. connection: docker: redirect: community.docker.docker oc: redirect: community.okd.oc + proxmox_pct_remote: + redirect: community.proxmox.proxmox_pct_remote + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. lookup: gcp_storage_file: redirect: community.google.gcp_storage_file hashi_vault: redirect: community.hashi_vault.hashi_vault + hiera: + deprecation: + removal_version: 13.0.0 + warning_text: >- + Hiera has been deprecated a long time ago. + If you disagree with this deprecation, please create an issue in the community.general repository. + manifold: + tombstone: + removal_version: 11.0.0 + warning_text: Company was acquired in 2021 and service was ceased afterwards. nios: redirect: infoblox.nios_modules.nios_lookup nios_next_ip: @@ -22,537 +136,72 @@ plugin_routing: nios_next_network: redirect: infoblox.nios_modules.nios_next_network modules: - database.aerospike.aerospike_migrations: - redirect: community.general.aerospike_migrations - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.aerospike_migrations - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.airbrake_deployment: - redirect: community.general.airbrake_deployment - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.airbrake_deployment - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.aix_devices: - redirect: community.general.aix_devices - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.aix_devices - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.aix_filesystem: - redirect: community.general.aix_filesystem - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.aix_filesystem - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.aix_inittab: - redirect: community.general.aix_inittab - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.aix_inittab - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.aix_lvg: - redirect: community.general.aix_lvg - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.aix_lvg - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.aix_lvol: - redirect: community.general.aix_lvol - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.aix_lvol - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.alerta_customer: - redirect: community.general.alerta_customer - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.alerta_customer - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.alicloud.ali_instance: - redirect: community.general.ali_instance - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ali_instance - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. ali_instance_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.ali_instance_info instead. - cloud.alicloud.ali_instance_info: - redirect: community.general.ali_instance_info + atomic_container: deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ali_instance_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.alternatives: - redirect: community.general.alternatives + removal_version: 13.0.0 + warning_text: Project Atomic was sunset by the end of 2019. + atomic_host: deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.alternatives - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.language.ansible_galaxy_install: - redirect: community.general.ansible_galaxy_install + removal_version: 13.0.0 + warning_text: Project Atomic was sunset by the end of 2019. + atomic_image: deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ansible_galaxy_install - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.apache2_mod_proxy: - redirect: community.general.apache2_mod_proxy + removal_version: 13.0.0 + warning_text: Project Atomic was sunset by the end of 2019. + bearychat: + tombstone: + removal_version: 12.0.0 + warning_text: Chat service is no longer available. + catapult: deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.apache2_mod_proxy - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.apache2_module: - redirect: community.general.apache2_module - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.apache2_module - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.apk: - redirect: community.general.apk - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.apk - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.apt_repo: - redirect: community.general.apt_repo - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.apt_repo - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.apt_rpm: - redirect: community.general.apt_rpm - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.apt_rpm - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - files.archive: - redirect: community.general.archive - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.archive - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.atomic.atomic_container: - redirect: community.general.atomic_container - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.atomic_container - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.atomic.atomic_host: - redirect: community.general.atomic_host - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.atomic_host - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.atomic.atomic_image: - redirect: community.general.atomic_image - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.atomic_image - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.awall: - redirect: community.general.awall - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.awall - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.beadm: - redirect: community.general.beadm - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.beadm - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - notification.bearychat: - redirect: community.general.bearychat - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.bearychat - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.bigpanda: - redirect: community.general.bigpanda - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.bigpanda - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - source_control.bitbucket.bitbucket_access_key: - redirect: community.general.bitbucket_access_key - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.bitbucket_access_key - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - source_control.bitbucket.bitbucket_pipeline_key_pair: - redirect: community.general.bitbucket_pipeline_key_pair - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.bitbucket_pipeline_key_pair - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - source_control.bitbucket.bitbucket_pipeline_known_host: - redirect: community.general.bitbucket_pipeline_known_host - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.bitbucket_pipeline_known_host - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - source_control.bitbucket.bitbucket_pipeline_variable: - redirect: community.general.bitbucket_pipeline_variable - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.bitbucket_pipeline_variable - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.language.bower: - redirect: community.general.bower - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.bower - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.language.bundler: - redirect: community.general.bundler - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.bundler - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - source_control.bzr: - redirect: community.general.bzr - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.bzr - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - notification.campfire: - redirect: community.general.campfire - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.campfire - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.capabilities: - redirect: community.general.capabilities - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.capabilities - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.language.cargo: - redirect: community.general.cargo - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.cargo - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - notification.catapult: - redirect: community.general.catapult - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.catapult - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.circonus_annotation: - redirect: community.general.circonus_annotation - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.circonus_annotation - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. + removal_version: 13.0.0 + warning_text: DNS fails to resolve the API endpoint used by the module since Oct 2024. See https://github.com/ansible-collections/community.general/issues/10318 for details. cisco_spark: redirect: community.general.cisco_webex - notification.cisco_spark: - redirect: community.general.cisco_webex - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.cisco_webex - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - notification.cisco_webex: - redirect: community.general.cisco_webex - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.cisco_webex - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.centurylink.clc_aa_policy: - redirect: community.general.clc_aa_policy - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.clc_aa_policy - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.centurylink.clc_alert_policy: - redirect: community.general.clc_alert_policy - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.clc_alert_policy - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.centurylink.clc_blueprint_package: - redirect: community.general.clc_blueprint_package - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.clc_blueprint_package - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.centurylink.clc_firewall_policy: - redirect: community.general.clc_firewall_policy - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.clc_firewall_policy - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.centurylink.clc_group: - redirect: community.general.clc_group - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.clc_group - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.centurylink.clc_loadbalancer: - redirect: community.general.clc_loadbalancer - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.clc_loadbalancer - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.centurylink.clc_modify_server: - redirect: community.general.clc_modify_server - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.clc_modify_server - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.centurylink.clc_publicip: - redirect: community.general.clc_publicip - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.clc_publicip - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.centurylink.clc_server: - redirect: community.general.clc_server - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.clc_server - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.centurylink.clc_server_snapshot: - redirect: community.general.clc_server_snapshot - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.clc_server_snapshot - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.misc.cloud_init_data_facts: - redirect: community.general.cloud_init_data_facts - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.cloud_init_data_facts - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - net_tools.cloudflare_dns: - redirect: community.general.cloudflare_dns - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.cloudflare_dns - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.cobbler.cobbler_sync: - redirect: community.general.cobbler_sync - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.cobbler_sync - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.cobbler.cobbler_system: - redirect: community.general.cobbler_system - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.cobbler_system - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.language.composer: - redirect: community.general.composer - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.composer - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - clustering.consul.consul: - redirect: community.general.consul - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.consul - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - clustering.consul.consul_acl: - redirect: community.general.consul_acl - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.consul_acl - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - clustering.consul.consul_kv: - redirect: community.general.consul_kv - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.consul_kv - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - clustering.consul.consul_session: - redirect: community.general.consul_session - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.consul_session - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.copr: - redirect: community.general.copr - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.copr - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.language.cpanm: - redirect: community.general.cpanm - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.cpanm - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.cronvar: - redirect: community.general.cronvar - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.cronvar - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.crypttab: - redirect: community.general.crypttab - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.crypttab - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.datadog.datadog_downtime: - redirect: community.general.datadog_downtime - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.datadog_downtime - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.datadog.datadog_event: - redirect: community.general.datadog_event - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.datadog_event - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.datadog.datadog_monitor: - redirect: community.general.datadog_monitor - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.datadog_monitor - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.dconf: - redirect: community.general.dconf - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.dconf - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.deploy_helper: - redirect: community.general.deploy_helper - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.deploy_helper - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.dimensiondata.dimensiondata_network: - redirect: community.general.dimensiondata_network - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.dimensiondata_network - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.dimensiondata.dimensiondata_vlan: - redirect: community.general.dimensiondata_vlan - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.dimensiondata_vlan - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - notification.discord: - redirect: community.general.discord - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.discord - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.django_manage: - redirect: community.general.django_manage - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.django_manage - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.dnf_versionlock: - redirect: community.general.dnf_versionlock - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.dnf_versionlock - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - net_tools.dnsimple: - redirect: community.general.dnsimple - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.dnsimple - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - net_tools.dnsimple_info: - redirect: community.general.dnsimple_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.dnsimple_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - net_tools.dnsmadeeasy: - redirect: community.general.dnsmadeeasy - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.dnsmadeeasy - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. + clc_alert_policy: + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. + clc_blueprint_package: + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. + clc_firewall_policy: + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. + clc_group: + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. + clc_loadbalancer: + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. + clc_modify_server: + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. + clc_publicip: + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. + clc_server: + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. + clc_server_snapshot: + tombstone: + removal_version: 11.0.0 + warning_text: CenturyLink Cloud services went EOL in September 2023. + consul_acl: + tombstone: + removal_version: 10.0.0 + warning_text: Use community.general.consul_token and/or community.general.consul_policy instead. docker_compose: redirect: community.docker.docker_compose docker_config: @@ -607,101 +256,19 @@ plugin_routing: redirect: community.docker.docker_volume docker_volume_info: redirect: community.docker.docker_volume_info - system.dpkg_divert: - redirect: community.general.dpkg_divert - deprecation: + facter: + tombstone: + removal_version: 12.0.0 + warning_text: Use community.general.facter_facts instead. + flowdock: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.dpkg_divert - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.language.easy_install: - redirect: community.general.easy_install - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.easy_install - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.ejabberd_user: - redirect: community.general.ejabberd_user - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ejabberd_user - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - database.misc.elasticsearch_plugin: - redirect: community.general.elasticsearch_plugin - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.elasticsearch_plugin - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - storage.emc.emc_vnx_sg_member: - redirect: community.general.emc_vnx_sg_member - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.emc_vnx_sg_member - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - clustering.etcd3: - redirect: community.general.etcd3 - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.etcd3 - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.facter: - redirect: community.general.facter - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.facter - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - files.filesize: - redirect: community.general.filesize - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.filesize - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.filesystem: - redirect: community.general.filesystem - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.filesystem - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.flatpak: - redirect: community.general.flatpak - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.flatpak - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.flatpak_remote: - redirect: community.general.flatpak_remote - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.flatpak_remote - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - notification.flowdock: - redirect: community.general.flowdock - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.flowdock - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. + warning_text: This module relied on HTTPS APIs that do not exist anymore and + there is no clear path to update. foreman: tombstone: removal_version: 2.0.0 warning_text: Use the modules from the theforeman.foreman collection instead. - net_tools.gandi_livedns: - redirect: community.general.gandi_livedns - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.gandi_livedns - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. gc_storage: redirect: community.google.gc_storage gcdns_record: @@ -736,20 +303,6 @@ plugin_routing: redirect: community.google.gce_snapshot gce_tag: redirect: community.google.gce_tag - system.gconftool2: - redirect: community.general.gconftool2 - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.gconftool2 - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.gconftool2_info: - redirect: community.general.gconftool2_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.gconftool2_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. gcp_backend_service: tombstone: removal_version: 2.0.0 @@ -785,193 +338,13 @@ plugin_routing: removal_version: 2.0.0 warning_text: Use google.cloud.gcp_spanner_database and/or google.cloud.gcp_spanner_instance instead. - packaging.language.gem: - redirect: community.general.gem - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.gem - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - source_control.git_config: - redirect: community.general.git_config - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.git_config - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - source_control.github.github_deploy_key: - redirect: community.general.github_deploy_key - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.github_deploy_key - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. github_hooks: tombstone: removal_version: 2.0.0 warning_text: Use community.general.github_webhook and community.general.github_webhook_info instead. - source_control.github.github_issue: - redirect: community.general.github_issue - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.github_issue - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - source_control.github.github_key: - redirect: community.general.github_key - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.github_key - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - source_control.github.github_release: - redirect: community.general.github_release - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.github_release - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - source_control.github.github_repo: - redirect: community.general.github_repo - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.github_repo - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - source_control.github.github_webhook: - redirect: community.general.github_webhook - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.github_webhook - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - source_control.github.github_webhook_info: - redirect: community.general.github_webhook_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.github_webhook_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - source_control.gitlab.gitlab_branch: - redirect: community.general.gitlab_branch - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.gitlab_branch - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - source_control.gitlab.gitlab_deploy_key: - redirect: community.general.gitlab_deploy_key - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.gitlab_deploy_key - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - source_control.gitlab.gitlab_group: - redirect: community.general.gitlab_group - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.gitlab_group - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - source_control.gitlab.gitlab_group_members: - redirect: community.general.gitlab_group_members - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.gitlab_group_members - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - source_control.gitlab.gitlab_group_variable: - redirect: community.general.gitlab_group_variable - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.gitlab_group_variable - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - source_control.gitlab.gitlab_hook: - redirect: community.general.gitlab_hook - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.gitlab_hook - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - source_control.gitlab.gitlab_project: - redirect: community.general.gitlab_project - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.gitlab_project - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - source_control.gitlab.gitlab_project_members: - redirect: community.general.gitlab_project_members - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.gitlab_project_members - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - source_control.gitlab.gitlab_project_variable: - redirect: community.general.gitlab_project_variable - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.gitlab_project_variable - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - source_control.gitlab.gitlab_protected_branch: - redirect: community.general.gitlab_protected_branch - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.gitlab_protected_branch - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - source_control.gitlab.gitlab_runner: - redirect: community.general.gitlab_runner - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.gitlab_runner - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - source_control.gitlab.gitlab_user: - redirect: community.general.gitlab_user - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.gitlab_user - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - notification.grove: - redirect: community.general.grove - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.grove - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.gunicorn: - redirect: community.general.gunicorn - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.gunicorn - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - database.saphana.hana_query: - redirect: community.general.hana_query - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.hana_query - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - net_tools.haproxy: - redirect: community.general.haproxy - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.haproxy - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.heroku.heroku_collaborator: - redirect: community.general.heroku_collaborator - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.heroku_collaborator - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. + hana_query: + redirect: community.sap_libs.sap_hdbsql hetzner_failover_ip: redirect: community.hrobot.failover_ip hetzner_failover_ip_info: @@ -980,740 +353,30 @@ plugin_routing: redirect: community.hrobot.firewall hetzner_firewall_info: redirect: community.hrobot.firewall_info - source_control.hg: - redirect: community.general.hg - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.hg - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - notification.hipchat: - redirect: community.general.hipchat - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.hipchat - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.homebrew: - redirect: community.general.homebrew - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.homebrew - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.homebrew_cask: - redirect: community.general.homebrew_cask - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.homebrew_cask - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.homebrew_tap: - redirect: community.general.homebrew_tap - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.homebrew_tap - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.homectl: - redirect: community.general.homectl - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.homectl - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.honeybadger_deployment: - redirect: community.general.honeybadger_deployment - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.honeybadger_deployment - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.hpilo.hpilo_boot: - redirect: community.general.hpilo_boot - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.hpilo_boot - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. + hipchat: + tombstone: + removal_version: 11.0.0 + warning_text: The hipchat service has been discontinued and the self-hosted variant has been End of Life since 2020. hpilo_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.hpilo_info instead. - remote_management.hpilo.hpilo_info: - redirect: community.general.hpilo_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.hpilo_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.hpilo.hponcfg: - redirect: community.general.hponcfg - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.hponcfg - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.htpasswd: - redirect: community.general.htpasswd - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.htpasswd - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.huawei.hwc_ecs_instance: - redirect: community.general.hwc_ecs_instance - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.hwc_ecs_instance - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.huawei.hwc_evs_disk: - redirect: community.general.hwc_evs_disk - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.hwc_evs_disk - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.huawei.hwc_network_vpc: - redirect: community.general.hwc_network_vpc - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.hwc_network_vpc - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.huawei.hwc_smn_topic: - redirect: community.general.hwc_smn_topic - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.hwc_smn_topic - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.huawei.hwc_vpc_eip: - redirect: community.general.hwc_vpc_eip - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.hwc_vpc_eip - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.huawei.hwc_vpc_peering_connect: - redirect: community.general.hwc_vpc_peering_connect - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.hwc_vpc_peering_connect - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.huawei.hwc_vpc_port: - redirect: community.general.hwc_vpc_port - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.hwc_vpc_port - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.huawei.hwc_vpc_private_ip: - redirect: community.general.hwc_vpc_private_ip - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.hwc_vpc_private_ip - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.huawei.hwc_vpc_route: - redirect: community.general.hwc_vpc_route - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.hwc_vpc_route - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.huawei.hwc_vpc_security_group: - redirect: community.general.hwc_vpc_security_group - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.hwc_vpc_security_group - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.huawei.hwc_vpc_security_group_rule: - redirect: community.general.hwc_vpc_security_group_rule - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.hwc_vpc_security_group_rule - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.huawei.hwc_vpc_subnet: - redirect: community.general.hwc_vpc_subnet - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.hwc_vpc_subnet - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - storage.ibm.ibm_sa_domain: - redirect: community.general.ibm_sa_domain - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ibm_sa_domain - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - storage.ibm.ibm_sa_host: - redirect: community.general.ibm_sa_host - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ibm_sa_host - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - storage.ibm.ibm_sa_host_ports: - redirect: community.general.ibm_sa_host_ports - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ibm_sa_host_ports - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - storage.ibm.ibm_sa_pool: - redirect: community.general.ibm_sa_pool - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ibm_sa_pool - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - storage.ibm.ibm_sa_vol: - redirect: community.general.ibm_sa_vol - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ibm_sa_vol - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - storage.ibm.ibm_sa_vol_map: - redirect: community.general.ibm_sa_vol_map - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ibm_sa_vol_map - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.icinga2_feature: - redirect: community.general.icinga2_feature - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.icinga2_feature - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.icinga2_host: - redirect: community.general.icinga2_host - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.icinga2_host - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. idrac_firmware: redirect: dellemc.openmanage.idrac_firmware - remote_management.redfish.idrac_redfish_command: - redirect: community.general.idrac_redfish_command - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.idrac_redfish_command - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.redfish.idrac_redfish_config: - redirect: community.general.idrac_redfish_config - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.idrac_redfish_config - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. idrac_redfish_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.idrac_redfish_info instead. - remote_management.redfish.idrac_redfish_info: - redirect: community.general.idrac_redfish_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.idrac_redfish_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. idrac_server_config_profile: redirect: dellemc.openmanage.idrac_server_config_profile - remote_management.redfish.ilo_redfish_config: - redirect: community.general.ilo_redfish_config - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ilo_redfish_config - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.redfish.ilo_redfish_info: - redirect: community.general.ilo_redfish_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ilo_redfish_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.imc.imc_rest: - redirect: community.general.imc_rest - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.imc_rest - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.smartos.imgadm: - redirect: community.general.imgadm - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.imgadm - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - net_tools.infinity.infinity: - redirect: community.general.infinity - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.infinity - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - database.influxdb.influxdb_database: - redirect: community.general.influxdb_database - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.influxdb_database - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - database.influxdb.influxdb_query: - redirect: community.general.influxdb_query - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.influxdb_query - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - database.influxdb.influxdb_retention_policy: - redirect: community.general.influxdb_retention_policy - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.influxdb_retention_policy - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - database.influxdb.influxdb_user: - redirect: community.general.influxdb_user - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.influxdb_user - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - database.influxdb.influxdb_write: - redirect: community.general.influxdb_write - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.influxdb_write - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - files.ini_file: - redirect: community.general.ini_file - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ini_file - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.installp: - redirect: community.general.installp - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.installp - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.interfaces_file: - redirect: community.general.interfaces_file - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.interfaces_file - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - net_tools.ip_netns: - redirect: community.general.ip_netns - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ip_netns - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.ipa.ipa_config: - redirect: community.general.ipa_config - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ipa_config - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.ipa.ipa_dnsrecord: - redirect: community.general.ipa_dnsrecord - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ipa_dnsrecord - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.ipa.ipa_dnszone: - redirect: community.general.ipa_dnszone - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ipa_dnszone - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.ipa.ipa_group: - redirect: community.general.ipa_group - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ipa_group - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.ipa.ipa_hbacrule: - redirect: community.general.ipa_hbacrule - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ipa_hbacrule - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.ipa.ipa_host: - redirect: community.general.ipa_host - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ipa_host - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.ipa.ipa_hostgroup: - redirect: community.general.ipa_hostgroup - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ipa_hostgroup - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.ipa.ipa_otpconfig: - redirect: community.general.ipa_otpconfig - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ipa_otpconfig - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.ipa.ipa_otptoken: - redirect: community.general.ipa_otptoken - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ipa_otptoken - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.ipa.ipa_pwpolicy: - redirect: community.general.ipa_pwpolicy - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ipa_pwpolicy - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.ipa.ipa_role: - redirect: community.general.ipa_role - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ipa_role - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.ipa.ipa_service: - redirect: community.general.ipa_service - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ipa_service - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.ipa.ipa_subca: - redirect: community.general.ipa_subca - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ipa_subca - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.ipa.ipa_sudocmd: - redirect: community.general.ipa_sudocmd - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ipa_sudocmd - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.ipa.ipa_sudocmdgroup: - redirect: community.general.ipa_sudocmdgroup - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ipa_sudocmdgroup - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.ipa.ipa_sudorule: - redirect: community.general.ipa_sudorule - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ipa_sudorule - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.ipa.ipa_user: - redirect: community.general.ipa_user - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ipa_user - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.ipa.ipa_vault: - redirect: community.general.ipa_vault - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ipa_vault - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - net_tools.ipify_facts: - redirect: community.general.ipify_facts - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ipify_facts - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - net_tools.ipinfoio_facts: - redirect: community.general.ipinfoio_facts - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ipinfoio_facts - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.ipmi.ipmi_boot: - redirect: community.general.ipmi_boot - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ipmi_boot - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.ipmi.ipmi_power: - redirect: community.general.ipmi_power - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ipmi_power - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.iptables_state: - redirect: community.general.iptables_state - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.iptables_state - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - net_tools.ipwcli_dns: - redirect: community.general.ipwcli_dns - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ipwcli_dns - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - notification.irc: - redirect: community.general.irc - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.irc - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - files.iso_create: - redirect: community.general.iso_create - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.iso_create - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - files.iso_extract: - redirect: community.general.iso_extract - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.iso_extract - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - files.iso_customize: - redirect: community.general.iso_customize - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.iso_customize - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - notification.jabber: - redirect: community.general.jabber - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.jabber - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.java_cert: - redirect: community.general.java_cert - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.java_cert - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.java_keystore: - redirect: community.general.java_keystore - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.java_keystore - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.jboss: - redirect: community.general.jboss - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.jboss - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.jenkins_build: - redirect: community.general.jenkins_build - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.jenkins_build - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.jenkins_job: - redirect: community.general.jenkins_job - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.jenkins_job - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. jenkins_job_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.jenkins_job_info instead. - web_infrastructure.jenkins_job_info: - redirect: community.general.jenkins_job_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.jenkins_job_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.jenkins_plugin: - redirect: community.general.jenkins_plugin - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.jenkins_plugin - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.jenkins_script: - redirect: community.general.jenkins_script - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.jenkins_script - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.jira: - redirect: community.general.jira - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.jira - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. katello: tombstone: removal_version: 2.0.0 warning_text: Use the modules from the theforeman.foreman collection instead. - system.kernel_blacklist: - redirect: community.general.kernel_blacklist - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.kernel_blacklist - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.keycloak.keycloak_authentication: - redirect: community.general.keycloak_authentication - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.keycloak_authentication - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.keycloak.keycloak_client: - redirect: community.general.keycloak_client - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.keycloak_client - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.keycloak.keycloak_client_rolemapping: - redirect: community.general.keycloak_client_rolemapping - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.keycloak_client_rolemapping - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.keycloak.keycloak_clientscope: - redirect: community.general.keycloak_clientscope - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.keycloak_clientscope - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.keycloak.keycloak_clienttemplate: - redirect: community.general.keycloak_clienttemplate - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.keycloak_clienttemplate - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.keycloak.keycloak_group: - redirect: community.general.keycloak_group - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.keycloak_group - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.keycloak.keycloak_identity_provider: - redirect: community.general.keycloak_identity_provider - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.keycloak_identity_provider - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.keycloak.keycloak_realm: - redirect: community.general.keycloak_realm - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.keycloak_realm - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.keycloak.keycloak_realm_info: - redirect: community.general.keycloak_realm_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.keycloak_realm_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.keycloak.keycloak_role: - redirect: community.general.keycloak_role - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.keycloak_role - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.keycloak.keycloak_user_federation: - redirect: community.general.keycloak_user_federation - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.keycloak_user_federation - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.keycloak.keycloak_user_rolemapping: - redirect: community.general.keycloak_user_rolemapping - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.keycloak_user_rolemapping - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.keyring: - redirect: community.general.keyring - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.keyring - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.keyring_info: - redirect: community.general.keyring_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.keyring_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - database.misc.kibana_plugin: - redirect: community.general.kibana_plugin - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.kibana_plugin - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. kubevirt_cdi_upload: redirect: community.kubevirt.kubevirt_cdi_upload kubevirt_preset: @@ -1726,115 +389,10 @@ plugin_routing: redirect: community.kubevirt.kubevirt_template kubevirt_vm: redirect: community.kubevirt.kubevirt_vm - system.launchd: - redirect: community.general.launchd - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.launchd - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.layman: - redirect: community.general.layman - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.layman - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.lbu: - redirect: community.general.lbu - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.lbu - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. ldap_attr: tombstone: removal_version: 3.0.0 warning_text: Use community.general.ldap_attrs instead. - net_tools.ldap.ldap_attrs: - redirect: community.general.ldap_attrs - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ldap_attrs - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - net_tools.ldap.ldap_entry: - redirect: community.general.ldap_entry - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ldap_entry - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - net_tools.ldap.ldap_passwd: - redirect: community.general.ldap_passwd - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ldap_passwd - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - net_tools.ldap.ldap_search: - redirect: community.general.ldap_search - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ldap_search - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.librato_annotation: - redirect: community.general.librato_annotation - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.librato_annotation - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.linode.linode: - redirect: community.general.linode - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.linode - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.linode.linode_v4: - redirect: community.general.linode_v4 - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.linode_v4 - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.listen_ports_facts: - redirect: community.general.listen_ports_facts - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.listen_ports_facts - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - net_tools.lldp: - redirect: community.general.lldp - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.lldp - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.locale_gen: - redirect: community.general.locale_gen - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.locale_gen - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.logentries: - redirect: community.general.logentries - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.logentries - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - notification.logentries_msg: - redirect: community.general.logentries_msg - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.logentries_msg - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. logicmonitor: tombstone: removal_version: 1.0.0 @@ -1845,280 +403,14 @@ plugin_routing: removal_version: 1.0.0 warning_text: The logicmonitor_facts module is no longer maintained and the API used has been disabled in 2017. - monitoring.logstash_plugin: - redirect: community.general.logstash_plugin - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.logstash_plugin - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.lvg: - redirect: community.general.lvg - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.lvg - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.lvol: - redirect: community.general.lvol - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.lvol - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.lxc.lxc_container: - redirect: community.general.lxc_container - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.lxc_container - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.lxca.lxca_cmms: - redirect: community.general.lxca_cmms - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.lxca_cmms - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.lxca.lxca_nodes: - redirect: community.general.lxca_nodes - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.lxca_nodes - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.lxd.lxd_container: - redirect: community.general.lxd_container - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.lxd_container - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.lxd.lxd_profile: - redirect: community.general.lxd_profile - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.lxd_profile - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.lxd.lxd_project: - redirect: community.general.lxd_project - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.lxd_project - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.macports: - redirect: community.general.macports - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.macports - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - notification.mail: - redirect: community.general.mail - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.mail - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.make: - redirect: community.general.make - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.make - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.manageiq.manageiq_alert_profiles: - redirect: community.general.manageiq_alert_profiles - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.manageiq_alert_profiles - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.manageiq.manageiq_alerts: - redirect: community.general.manageiq_alerts - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.manageiq_alerts - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.manageiq.manageiq_group: - redirect: community.general.manageiq_group - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.manageiq_group - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.manageiq.manageiq_policies: - redirect: community.general.manageiq_policies - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.manageiq_policies - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.manageiq.manageiq_policies_info: - redirect: community.general.manageiq_policies_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.manageiq_policies_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.manageiq.manageiq_provider: - redirect: community.general.manageiq_provider - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.manageiq_provider - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.manageiq.manageiq_tags: - redirect: community.general.manageiq_tags - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.manageiq_tags - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.manageiq.manageiq_tags_info: - redirect: community.general.manageiq_tags_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.manageiq_tags_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.manageiq.manageiq_tenant: - redirect: community.general.manageiq_tenant - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.manageiq_tenant - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.manageiq.manageiq_user: - redirect: community.general.manageiq_user - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.manageiq_user - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.mas: - redirect: community.general.mas - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.mas - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - notification.matrix: - redirect: community.general.matrix - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.matrix - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - notification.mattermost: - redirect: community.general.mattermost - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.mattermost - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.language.maven_artifact: - redirect: community.general.maven_artifact - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.maven_artifact - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.memset.memset_dns_reload: - redirect: community.general.memset_dns_reload - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.memset_dns_reload - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. memset_memstore_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.memset_memstore_info instead. - cloud.memset.memset_memstore_info: - redirect: community.general.memset_memstore_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.memset_memstore_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. memset_server_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.memset_server_info instead. - cloud.memset.memset_server_info: - redirect: community.general.memset_server_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.memset_server_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.memset.memset_zone: - redirect: community.general.memset_zone - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.memset_zone - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.memset.memset_zone_domain: - redirect: community.general.memset_zone_domain - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.memset_zone_domain - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.memset.memset_zone_record: - redirect: community.general.memset_zone_record - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.memset_zone_record - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.mksysb: - redirect: community.general.mksysb - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.mksysb - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.modprobe: - redirect: community.general.modprobe - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.modprobe - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.monit: - redirect: community.general.monit - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.monit - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - notification.mqtt: - redirect: community.general.mqtt - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.mqtt - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - database.mssql.mssql_db: - redirect: community.general.mssql_db - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.mssql_db - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - database.mssql.mssql_script: - redirect: community.general.mssql_script - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.mssql_script - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. na_cdot_aggregate: tombstone: removal_version: 2.0.0 @@ -2155,52 +447,10 @@ plugin_routing: tombstone: removal_version: 3.0.0 warning_text: Use netapp.ontap.na_ontap_info instead. - monitoring.nagios: - redirect: community.general.nagios - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.nagios - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - net_tools.netcup_dns: - redirect: community.general.netcup_dns - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.netcup_dns - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.newrelic_deployment: - redirect: community.general.newrelic_deployment - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.newrelic_deployment - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - notification.nexmo: - redirect: community.general.nexmo - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.nexmo - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. nginx_status_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.nginx_status_info instead. - web_infrastructure.nginx_status_info: - redirect: community.general.nginx_status_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.nginx_status_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.smartos.nictagadm: - redirect: community.general.nictagadm - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.nictagadm - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. nios_a_record: redirect: infoblox.nios_modules.nios_a_record nios_aaaa_record: @@ -2233,400 +483,61 @@ plugin_routing: redirect: infoblox.nios_modules.nios_txt_record nios_zone: redirect: infoblox.nios_modules.nios_zone - net_tools.nmcli: - redirect: community.general.nmcli + oci_vcn: deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.nmcli - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - clustering.nomad.nomad_job: - redirect: community.general.nomad_job - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.nomad_job - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - clustering.nomad.nomad_job_info: - redirect: community.general.nomad_job_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.nomad_job_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.nosh: - redirect: community.general.nosh - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.nosh - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.language.npm: - redirect: community.general.npm - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.npm - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - net_tools.nsupdate: - redirect: community.general.nsupdate - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.nsupdate - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.oracle.oci_vcn: - redirect: community.general.oci_vcn - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.oci_vcn - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - database.misc.odbc: - redirect: community.general.odbc - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.odbc - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - notification.office_365_connector_card: - redirect: community.general.office_365_connector_card - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.office_365_connector_card - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.ohai: - redirect: community.general.ohai - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ohai - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - net_tools.omapi_host: - redirect: community.general.omapi_host - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.omapi_host - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. + removal_version: 13.0.0 + warning_text: Use oracle.oci.oci_network_vcn instead. ome_device_info: redirect: dellemc.openmanage.ome_device_info - cloud.opennebula.one_host: - redirect: community.general.one_host - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.one_host - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.opennebula.one_image: - redirect: community.general.one_image - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.one_image - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. one_image_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.one_image_info instead. - cloud.opennebula.one_image_info: - redirect: community.general.one_image_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.one_image_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.opennebula.one_service: - redirect: community.general.one_service - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.one_service - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.opennebula.one_template: - redirect: community.general.one_template - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.one_template - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.opennebula.one_vm: - redirect: community.general.one_vm - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.one_vm - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.oneandone.oneandone_firewall_policy: - redirect: community.general.oneandone_firewall_policy - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.oneandone_firewall_policy - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.oneandone.oneandone_load_balancer: - redirect: community.general.oneandone_load_balancer - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.oneandone_load_balancer - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.oneandone.oneandone_monitoring_policy: - redirect: community.general.oneandone_monitoring_policy - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.oneandone_monitoring_policy - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.oneandone.oneandone_private_network: - redirect: community.general.oneandone_private_network - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.oneandone_private_network - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.oneandone.oneandone_public_ip: - redirect: community.general.oneandone_public_ip - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.oneandone_public_ip - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.oneandone.oneandone_server: - redirect: community.general.oneandone_server - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.oneandone_server - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. onepassword_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.onepassword_info instead. - identity.onepassword_info: - redirect: community.general.onepassword_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.onepassword_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. oneview_datacenter_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.oneview_datacenter_info instead. - remote_management.oneview.oneview_datacenter_info: - redirect: community.general.oneview_datacenter_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.oneview_datacenter_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. oneview_enclosure_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.oneview_enclosure_info instead. - remote_management.oneview.oneview_enclosure_info: - redirect: community.general.oneview_enclosure_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.oneview_enclosure_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.oneview.oneview_ethernet_network: - redirect: community.general.oneview_ethernet_network - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.oneview_ethernet_network - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. oneview_ethernet_network_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.oneview_ethernet_network_info instead. - remote_management.oneview.oneview_ethernet_network_info: - redirect: community.general.oneview_ethernet_network_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.oneview_ethernet_network_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.oneview.oneview_fc_network: - redirect: community.general.oneview_fc_network - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.oneview_fc_network - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. oneview_fc_network_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.oneview_fc_network_info instead. - remote_management.oneview.oneview_fc_network_info: - redirect: community.general.oneview_fc_network_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.oneview_fc_network_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.oneview.oneview_fcoe_network: - redirect: community.general.oneview_fcoe_network - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.oneview_fcoe_network - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. oneview_fcoe_network_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.oneview_fcoe_network_info instead. - remote_management.oneview.oneview_fcoe_network_info: - redirect: community.general.oneview_fcoe_network_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.oneview_fcoe_network_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.oneview.oneview_logical_interconnect_group: - redirect: community.general.oneview_logical_interconnect_group - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.oneview_logical_interconnect_group - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. oneview_logical_interconnect_group_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.oneview_logical_interconnect_group_info instead. - remote_management.oneview.oneview_logical_interconnect_group_info: - redirect: community.general.oneview_logical_interconnect_group_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.oneview_logical_interconnect_group_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.oneview.oneview_network_set: - redirect: community.general.oneview_network_set - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.oneview_network_set - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. oneview_network_set_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.oneview_network_set_info instead. - remote_management.oneview.oneview_network_set_info: - redirect: community.general.oneview_network_set_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.oneview_network_set_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.oneview.oneview_san_manager: - redirect: community.general.oneview_san_manager - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.oneview_san_manager - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. oneview_san_manager_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.oneview_san_manager_info instead. - remote_management.oneview.oneview_san_manager_info: - redirect: community.general.oneview_san_manager_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.oneview_san_manager_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. online_server_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.online_server_info instead. - cloud.online.online_server_info: - redirect: community.general.online_server_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.online_server_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. online_user_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.online_user_info instead. - cloud.online.online_user_info: - redirect: community.general.online_user_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.online_user_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.open_iscsi: - redirect: community.general.open_iscsi - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.open_iscsi - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.openbsd_pkg: - redirect: community.general.openbsd_pkg - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.openbsd_pkg - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - identity.opendj.opendj_backendprop: - redirect: community.general.opendj_backendprop - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.opendj_backendprop - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.openwrt_init: - redirect: community.general.openwrt_init - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.openwrt_init - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.opkg: - redirect: community.general.opkg - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.opkg - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.osx_defaults: - redirect: community.general.osx_defaults - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.osx_defaults - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.ovh.ovh_ip_failover: - redirect: community.general.ovh_ip_failover - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ovh_ip_failover - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.ovh.ovh_ip_loadbalancing_backend: - redirect: community.general.ovh_ip_loadbalancing_backend - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ovh_ip_loadbalancing_backend - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.ovh.ovh_monthly_billing: - redirect: community.general.ovh_monthly_billing - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ovh_monthly_billing - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. ovirt: tombstone: removal_version: 3.0.0 @@ -2727,216 +638,6 @@ plugin_routing: tombstone: removal_version: 3.0.0 warning_text: Use ovirt.ovirt.ovirt_vmpool_info instead. - clustering.pacemaker_cluster: - redirect: community.general.pacemaker_cluster - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pacemaker_cluster - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.packet.packet_device: - redirect: community.general.packet_device - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.packet_device - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.packet.packet_ip_subnet: - redirect: community.general.packet_ip_subnet - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.packet_ip_subnet - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.packet.packet_project: - redirect: community.general.packet_project - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.packet_project - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.packet.packet_sshkey: - redirect: community.general.packet_sshkey - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.packet_sshkey - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.packet.packet_volume: - redirect: community.general.packet_volume - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.packet_volume - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.packet.packet_volume_attachment: - redirect: community.general.packet_volume_attachment - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.packet_volume_attachment - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.pacman: - redirect: community.general.pacman - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pacman - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.pacman_key: - redirect: community.general.pacman_key - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pacman_key - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.pagerduty: - redirect: community.general.pagerduty - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pagerduty - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.pagerduty_alert: - redirect: community.general.pagerduty_alert - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pagerduty_alert - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.pagerduty_change: - redirect: community.general.pagerduty_change - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pagerduty_change - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.pagerduty_user: - redirect: community.general.pagerduty_user - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pagerduty_user - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.pam_limits: - redirect: community.general.pam_limits - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pam_limits - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.pamd: - redirect: community.general.pamd - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pamd - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.parted: - redirect: community.general.parted - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.parted - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.language.pear: - redirect: community.general.pear - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pear - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.pids: - redirect: community.general.pids - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pids - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.pingdom: - redirect: community.general.pingdom - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pingdom - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.language.pip_package_info: - redirect: community.general.pip_package_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pip_package_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.language.pipx: - redirect: community.general.pipx - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pipx - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.language.pipx_info: - redirect: community.general.pipx_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pipx_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.pkg5: - redirect: community.general.pkg5 - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pkg5 - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.pkg5_publisher: - redirect: community.general.pkg5_publisher - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pkg5_publisher - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.pkgin: - redirect: community.general.pkgin - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pkgin - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.pkgng: - redirect: community.general.pkgng - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pkgng - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.pkgutil: - redirect: community.general.pkgutil - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pkgutil - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - storage.pmem.pmem: - redirect: community.general.pmem - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pmem - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.portage: - redirect: community.general.portage - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.portage - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.portinstall: - redirect: community.general.portinstall - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.portinstall - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. postgresql_copy: redirect: community.postgresql.postgresql_copy postgresql_db: @@ -2981,167 +682,116 @@ plugin_routing: redirect: community.postgresql.postgresql_user postgresql_user_obj_stat_info: redirect: community.postgresql.postgresql_user_obj_stat_info - net_tools.pritunl.pritunl_org: - redirect: community.general.pritunl_org + profitbricks: + tombstone: + removal_version: 11.0.0 + warning_text: Supporting library is unsupported since 2021. + profitbricks_datacenter: + tombstone: + removal_version: 11.0.0 + warning_text: Supporting library is unsupported since 2021. + profitbricks_nic: + tombstone: + removal_version: 11.0.0 + warning_text: Supporting library is unsupported since 2021. + profitbricks_volume: + tombstone: + removal_version: 11.0.0 + warning_text: Supporting library is unsupported since 2021. + profitbricks_volume_attachments: + tombstone: + removal_version: 11.0.0 + warning_text: Supporting library is unsupported since 2021. + proxmox: + redirect: community.proxmox.proxmox deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pritunl_org - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - net_tools.pritunl.pritunl_org_info: - redirect: community.general.pritunl_org_info + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_backup: + redirect: community.proxmox.proxmox_backup deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pritunl_org_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - net_tools.pritunl.pritunl_user: - redirect: community.general.pritunl_user + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_backup_info: + redirect: community.proxmox.proxmox_backup_info deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pritunl_user - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - net_tools.pritunl.pritunl_user_info: - redirect: community.general.pritunl_user_info + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_disk: + redirect: community.proxmox.proxmox_disk deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pritunl_user_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.profitbricks.profitbricks: - redirect: community.general.profitbricks + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_domain_info: + redirect: community.proxmox.proxmox_domain_info deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.profitbricks - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.profitbricks.profitbricks_datacenter: - redirect: community.general.profitbricks_datacenter + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_group_info: + redirect: community.proxmox.proxmox_group_info deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.profitbricks_datacenter - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.profitbricks.profitbricks_nic: - redirect: community.general.profitbricks_nic + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_kvm: + redirect: community.proxmox.proxmox_kvm deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.profitbricks_nic - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.profitbricks.profitbricks_volume: - redirect: community.general.profitbricks_volume + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_nic: + redirect: community.proxmox.proxmox_nic deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.profitbricks_volume - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.profitbricks.profitbricks_volume_attachments: - redirect: community.general.profitbricks_volume_attachments + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_node_info: + redirect: community.proxmox.proxmox_node_info deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.profitbricks_volume_attachments - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.misc.proxmox: - redirect: community.general.proxmox + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_pool: + redirect: community.proxmox.proxmox_pool deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.proxmox - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.misc.proxmox_disk: - redirect: community.general.proxmox_disk + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_pool_member: + redirect: community.proxmox.proxmox_pool_member deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.proxmox_disk - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.misc.proxmox_domain_info: - redirect: community.general.proxmox_domain_info + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_snap: + redirect: community.proxmox.proxmox_snap deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.proxmox_domain_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.misc.proxmox_group_info: - redirect: community.general.proxmox_group_info + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_storage_contents_info: + redirect: community.proxmox.proxmox_storage_contents_info deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.proxmox_group_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.misc.proxmox_kvm: - redirect: community.general.proxmox_kvm + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_storage_info: + redirect: community.proxmox.proxmox_storage_info deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.proxmox_kvm - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.misc.proxmox_nic: - redirect: community.general.proxmox_nic + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_tasks_info: + redirect: community.proxmox.proxmox_tasks_info deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.proxmox_nic - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.misc.proxmox_snap: - redirect: community.general.proxmox_snap + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_template: + redirect: community.proxmox.proxmox_template deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.proxmox_snap - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.misc.proxmox_storage_info: - redirect: community.general.proxmox_storage_info + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_user_info: + redirect: community.proxmox.proxmox_user_info deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.proxmox_storage_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.misc.proxmox_tasks_info: - redirect: community.general.proxmox_tasks_info + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + proxmox_vm_info: + redirect: community.proxmox.proxmox_vm_info deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.proxmox_tasks_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.misc.proxmox_template: - redirect: community.general.proxmox_template - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.proxmox_template - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.misc.proxmox_user_info: - redirect: community.general.proxmox_user_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.proxmox_user_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.pubnub.pubnub_blocks: - redirect: community.general.pubnub_blocks - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pubnub_blocks - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.pulp_repo: - redirect: community.general.pulp_repo - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pulp_repo - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.puppet: - redirect: community.general.puppet - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.puppet - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. purefa_facts: tombstone: removal_version: 3.0.0 @@ -3150,665 +800,178 @@ plugin_routing: tombstone: removal_version: 3.0.0 warning_text: Use purestorage.flashblade.purefb_info instead. - notification.pushbullet: - redirect: community.general.pushbullet - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pushbullet - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - notification.pushover: - redirect: community.general.pushover - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.pushover - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. python_requirements_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.python_requirements_info instead. - system.python_requirements_info: - redirect: community.general.python_requirements_info - deprecation: + rax_cbs_attachments: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.python_requirements_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.rackspace.rax: - redirect: community.general.rax - deprecation: + warning_text: This module relied on the deprecated package pyrax. + rax_cbs: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rax - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.rackspace.rax_cbs: - redirect: community.general.rax_cbs - deprecation: + warning_text: This module relied on the deprecated package pyrax. + rax_cdb_database: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rax_cbs - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.rackspace.rax_cbs_attachments: - redirect: community.general.rax_cbs_attachments - deprecation: + warning_text: This module relied on the deprecated package pyrax. + rax_cdb_user: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rax_cbs_attachments - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.rackspace.rax_cdb: - redirect: community.general.rax_cdb - deprecation: + warning_text: This module relied on the deprecated package pyrax. + rax_cdb: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rax_cdb - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.rackspace.rax_cdb_database: - redirect: community.general.rax_cdb_database - deprecation: + warning_text: This module relied on the deprecated package pyrax. + rax_clb_nodes: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rax_cdb_database - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.rackspace.rax_cdb_user: - redirect: community.general.rax_cdb_user - deprecation: + warning_text: This module relied on the deprecated package pyrax. + rax_clb_ssl: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rax_cdb_user - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.rackspace.rax_clb: - redirect: community.general.rax_clb - deprecation: + warning_text: This module relied on the deprecated package pyrax. + rax_clb: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rax_clb - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.rackspace.rax_clb_nodes: - redirect: community.general.rax_clb_nodes - deprecation: + warning_text: This module relied on the deprecated package pyrax. + rax_dns_record: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rax_clb_nodes - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.rackspace.rax_clb_ssl: - redirect: community.general.rax_clb_ssl - deprecation: + warning_text: This module relied on the deprecated package pyrax. + rax_dns: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rax_clb_ssl - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.rackspace.rax_dns: - redirect: community.general.rax_dns - deprecation: + warning_text: This module relied on the deprecated package pyrax. + rax_facts: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rax_dns - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.rackspace.rax_dns_record: - redirect: community.general.rax_dns_record - deprecation: + warning_text: This module relied on the deprecated package pyrax. + rax_files_objects: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rax_dns_record - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.rackspace.rax_facts: - redirect: community.general.rax_facts - deprecation: + warning_text: This module relied on the deprecated package pyrax. + rax_files: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rax_facts - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.rackspace.rax_files: - redirect: community.general.rax_files - deprecation: + warning_text: This module relied on the deprecated package pyrax. + rax_identity: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rax_files - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.rackspace.rax_files_objects: - redirect: community.general.rax_files_objects - deprecation: + warning_text: This module relied on the deprecated package pyrax. + rax_keypair: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rax_files_objects - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.rackspace.rax_identity: - redirect: community.general.rax_identity - deprecation: + warning_text: This module relied on the deprecated package pyrax. + rax_meta: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rax_identity - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.rackspace.rax_keypair: - redirect: community.general.rax_keypair - deprecation: + warning_text: This module relied on the deprecated package pyrax. + rax_mon_alarm: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rax_keypair - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.rackspace.rax_meta: - redirect: community.general.rax_meta - deprecation: + warning_text: This module relied on the deprecated package pyrax. + rax: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rax_meta - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.rackspace.rax_mon_alarm: - redirect: community.general.rax_mon_alarm - deprecation: + warning_text: This module relied on the deprecated package pyrax. + rax_mon_check: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rax_mon_alarm - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.rackspace.rax_mon_check: - redirect: community.general.rax_mon_check - deprecation: + warning_text: This module relied on the deprecated package pyrax. + rax_mon_entity: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rax_mon_check - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.rackspace.rax_mon_entity: - redirect: community.general.rax_mon_entity - deprecation: + warning_text: This module relied on the deprecated package pyrax. + rax_mon_notification_plan: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rax_mon_entity - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.rackspace.rax_mon_notification: - redirect: community.general.rax_mon_notification - deprecation: + warning_text: This module relied on the deprecated package pyrax. + rax_mon_notification: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rax_mon_notification - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.rackspace.rax_mon_notification_plan: - redirect: community.general.rax_mon_notification_plan - deprecation: + warning_text: This module relied on the deprecated package pyrax. + rax_network: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rax_mon_notification_plan - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.rackspace.rax_network: - redirect: community.general.rax_network - deprecation: + warning_text: This module relied on the deprecated package pyrax. + rax_queue: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rax_network - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.rackspace.rax_queue: - redirect: community.general.rax_queue - deprecation: + warning_text: This module relied on the deprecated package pyrax. + rax_scaling_group: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rax_queue - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.rackspace.rax_scaling_group: - redirect: community.general.rax_scaling_group - deprecation: + warning_text: This module relied on the deprecated package pyrax. + rax_scaling_policy: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rax_scaling_group - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.rackspace.rax_scaling_policy: - redirect: community.general.rax_scaling_policy - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rax_scaling_policy - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - files.read_csv: - redirect: community.general.read_csv - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.read_csv - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.redfish.redfish_command: - redirect: community.general.redfish_command - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.redfish_command - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.redfish.redfish_config: - redirect: community.general.redfish_config - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.redfish_config - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. + warning_text: This module relied on the deprecated package pyrax. redfish_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.redfish_info instead. - remote_management.redfish.redfish_info: - redirect: community.general.redfish_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.redfish_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.redhat_subscription: - redirect: community.general.redhat_subscription - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.redhat_subscription - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - database.misc.redis: - redirect: community.general.redis - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.redis - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - database.misc.redis_data: - redirect: community.general.redis_data - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.redis_data - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - database.misc.redis_data_incr: - redirect: community.general.redis_data_incr - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.redis_data_incr - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - database.misc.redis_data_info: - redirect: community.general.redis_data_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.redis_data_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - database.misc.redis_info: - redirect: community.general.redis_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.redis_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.misc.rhevm: - redirect: community.general.rhevm - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rhevm - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.rhn_channel: - redirect: community.general.rhn_channel - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rhn_channel - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.rhn_register: - redirect: community.general.rhn_register - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rhn_register - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.rhsm_release: - redirect: community.general.rhsm_release - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rhsm_release - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.rhsm_repository: - redirect: community.general.rhsm_repository - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rhsm_repository - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - database.misc.riak: - redirect: community.general.riak - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.riak - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - notification.rocketchat: - redirect: community.general.rocketchat - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rocketchat - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.rollbar_deployment: - redirect: community.general.rollbar_deployment - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rollbar_deployment - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.rpm_ostree_pkg: - redirect: community.general.rpm_ostree_pkg - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rpm_ostree_pkg - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.rundeck_acl_policy: - redirect: community.general.rundeck_acl_policy - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rundeck_acl_policy - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.rundeck_job_executions_info: - redirect: community.general.rundeck_job_executions_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rundeck_job_executions_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.rundeck_job_run: - redirect: community.general.rundeck_job_run - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rundeck_job_run - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.rundeck_project: - redirect: community.general.rundeck_project - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.rundeck_project - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.runit: - redirect: community.general.runit - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.runit - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.sap_task_list_execute: - redirect: community.general.sap_task_list_execute - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.sap_task_list_execute - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - files.sapcar_extract: - redirect: community.general.sapcar_extract - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.sapcar_extract - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - notification.say: - redirect: community.general.say - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.say - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.scaleway.scaleway_compute: - redirect: community.general.scaleway_compute - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.scaleway_compute - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.scaleway.scaleway_compute_private_network: - redirect: community.general.scaleway_compute_private_network - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.scaleway_compute_private_network - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.scaleway.scaleway_container_registry: - redirect: community.general.scaleway_container_registry - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.scaleway_container_registry - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.scaleway.scaleway_container_registry_info: - redirect: community.general.scaleway_container_registry_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.scaleway_container_registry_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.scaleway.scaleway_database_backup: - redirect: community.general.scaleway_database_backup - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.scaleway_database_backup - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.scaleway.scaleway_function_namespace: - redirect: community.general.scaleway_function_namespace - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.scaleway_function_namespace - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.scaleway.scaleway_function_namespace_info: - redirect: community.general.scaleway_function_namespace_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.scaleway_function_namespace_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. + rhn_channel: + tombstone: + removal_version: 10.0.0 + warning_text: RHN is EOL. + rhn_register: + tombstone: + removal_version: 10.0.0 + warning_text: RHN is EOL. + sapcar_extract: + redirect: community.sap_libs.sapcar_extract + sap_task_list_execute: + redirect: community.sap_libs.sap_task_list_execute scaleway_image_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.scaleway_image_info instead. - cloud.scaleway.scaleway_image_info: - redirect: community.general.scaleway_image_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.scaleway_image_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.scaleway.scaleway_ip: - redirect: community.general.scaleway_ip - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.scaleway_ip - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. scaleway_ip_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.scaleway_ip_info instead. - cloud.scaleway.scaleway_ip_info: - redirect: community.general.scaleway_ip_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.scaleway_ip_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.scaleway.scaleway_lb: - redirect: community.general.scaleway_lb - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.scaleway_lb - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. scaleway_organization_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.scaleway_organization_info instead. - cloud.scaleway.scaleway_organization_info: - redirect: community.general.scaleway_organization_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.scaleway_organization_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.scaleway.scaleway_private_network: - redirect: community.general.scaleway_private_network - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.scaleway_private_network - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.scaleway.scaleway_security_group: - redirect: community.general.scaleway_security_group - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.scaleway_security_group - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. scaleway_security_group_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.scaleway_security_group_info instead. - cloud.scaleway.scaleway_security_group_info: - redirect: community.general.scaleway_security_group_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.scaleway_security_group_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.scaleway.scaleway_security_group_rule: - redirect: community.general.scaleway_security_group_rule - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.scaleway_security_group_rule - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. scaleway_server_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.scaleway_server_info instead. - cloud.scaleway.scaleway_server_info: - redirect: community.general.scaleway_server_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.scaleway_server_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. scaleway_snapshot_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.scaleway_snapshot_info instead. - cloud.scaleway.scaleway_snapshot_info: - redirect: community.general.scaleway_snapshot_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.scaleway_snapshot_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.scaleway.scaleway_sshkey: - redirect: community.general.scaleway_sshkey - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.scaleway_sshkey - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.scaleway.scaleway_user_data: - redirect: community.general.scaleway_user_data - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.scaleway_user_data - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.scaleway.scaleway_volume: - redirect: community.general.scaleway_volume - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.scaleway_volume - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. scaleway_volume_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.scaleway_volume_info instead. - cloud.scaleway.scaleway_volume_info: - redirect: community.general.scaleway_volume_info + sensu_check: deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.scaleway_volume_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.sefcontext: - redirect: community.general.sefcontext + removal_version: 13.0.0 + warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. + sensu_client: deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.sefcontext - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.selinux_permissive: - redirect: community.general.selinux_permissive + removal_version: 13.0.0 + warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. + sensu_handler: deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.selinux_permissive - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.selogin: - redirect: community.general.selogin + removal_version: 13.0.0 + warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. + sensu_silence: deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.selogin - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - notification.sendgrid: - redirect: community.general.sendgrid + removal_version: 13.0.0 + warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. + sensu_subscription: deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.sendgrid - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.sensu.sensu_check: - redirect: community.general.sensu_check - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.sensu_check - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.sensu.sensu_client: - redirect: community.general.sensu_client - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.sensu_client - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.sensu.sensu_handler: - redirect: community.general.sensu_handler - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.sensu_handler - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.sensu.sensu_silence: - redirect: community.general.sensu_silence - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.sensu_silence - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.sensu.sensu_subscription: - redirect: community.general.sensu_subscription - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.sensu_subscription - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.seport: - redirect: community.general.seport - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.seport - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.misc.serverless: - redirect: community.general.serverless - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.serverless - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. + removal_version: 13.0.0 + warning_text: Sensu Core and Sensu Enterprise products have been End of Life since 2019/20. sf_account_manager: tombstone: removal_version: 2.0.0 @@ -3829,669 +992,53 @@ plugin_routing: tombstone: removal_version: 2.0.0 warning_text: Use netapp.elementsw.na_elementsw_volume instead. - system.shutdown: - redirect: community.general.shutdown - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.shutdown - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.softlayer.sl_vm: - redirect: community.general.sl_vm - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.sl_vm - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - notification.slack: - redirect: community.general.slack - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.slack - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.slackpkg: - redirect: community.general.slackpkg - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.slackpkg - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. smartos_image_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.smartos_image_info instead. - cloud.smartos.smartos_image_info: - redirect: community.general.smartos_image_info - deprecation: + stackdriver: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.smartos_image_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.snap: - redirect: community.general.snap + warning_text: This module relied on HTTPS APIs that do not exist anymore, + and any new development in the direction of providing an alternative should + happen in the context of the google.cloud collection. + typetalk: deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.snap - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.snap_alias: - redirect: community.general.snap_alias - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.snap_alias - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - net_tools.snmp_facts: - redirect: community.general.snmp_facts - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.snmp_facts - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.solaris_zone: - redirect: community.general.solaris_zone - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.solaris_zone - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.sorcery: - redirect: community.general.sorcery - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.sorcery - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.spectrum_device: - redirect: community.general.spectrum_device - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.spectrum_device - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.spectrum_model_attrs: - redirect: community.general.spectrum_model_attrs - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.spectrum_model_attrs - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.spotinst.spotinst_aws_elastigroup: - redirect: community.general.spotinst_aws_elastigroup - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.spotinst_aws_elastigroup - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - storage.hpe3par.ss_3par_cpg: - redirect: community.general.ss_3par_cpg - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ss_3par_cpg - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.ssh_config: - redirect: community.general.ssh_config - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ssh_config - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.stackdriver: - redirect: community.general.stackdriver - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.stackdriver - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.stacki.stacki_host: - redirect: community.general.stacki_host - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.stacki_host - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.statsd: - redirect: community.general.statsd - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.statsd - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.statusio_maintenance: - redirect: community.general.statusio_maintenance - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.statusio_maintenance - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.sudoers: - redirect: community.general.sudoers - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.sudoers - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.supervisorctl: - redirect: community.general.supervisorctl - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.supervisorctl - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.svc: - redirect: community.general.svc - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.svc - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.svr4pkg: - redirect: community.general.svr4pkg - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.svr4pkg - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.swdepot: - redirect: community.general.swdepot - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.swdepot - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.swupd: - redirect: community.general.swupd - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.swupd - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - notification.syslogger: - redirect: community.general.syslogger - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.syslogger - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.syspatch: - redirect: community.general.syspatch - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.syspatch - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.sysrc: - redirect: community.general.sysrc - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.sysrc - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.sysupgrade: - redirect: community.general.sysupgrade - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.sysupgrade - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.taiga_issue: - redirect: community.general.taiga_issue - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.taiga_issue - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - notification.telegram: - redirect: community.general.telegram - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.telegram - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.misc.terraform: - redirect: community.general.terraform - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.terraform - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.timezone: - redirect: community.general.timezone - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.timezone - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - notification.twilio: - redirect: community.general.twilio - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.twilio - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - notification.typetalk: - redirect: community.general.typetalk - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.typetalk - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.univention.udm_dns_record: - redirect: community.general.udm_dns_record - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.udm_dns_record - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.univention.udm_dns_zone: - redirect: community.general.udm_dns_zone - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.udm_dns_zone - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.univention.udm_group: - redirect: community.general.udm_group - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.udm_group - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.univention.udm_share: - redirect: community.general.udm_share - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.udm_share - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.univention.udm_user: - redirect: community.general.udm_user - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.udm_user - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.ufw: - redirect: community.general.ufw - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.ufw - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - monitoring.uptimerobot: - redirect: community.general.uptimerobot - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.uptimerobot - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.urpmi: - redirect: community.general.urpmi - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.urpmi - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.sophos_utm.utm_aaa_group: - redirect: community.general.utm_aaa_group - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.utm_aaa_group - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.sophos_utm.utm_aaa_group_info: - redirect: community.general.utm_aaa_group_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.utm_aaa_group_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.sophos_utm.utm_ca_host_key_cert: - redirect: community.general.utm_ca_host_key_cert - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.utm_ca_host_key_cert - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.sophos_utm.utm_ca_host_key_cert_info: - redirect: community.general.utm_ca_host_key_cert_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.utm_ca_host_key_cert_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.sophos_utm.utm_dns_host: - redirect: community.general.utm_dns_host - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.utm_dns_host - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.sophos_utm.utm_network_interface_address: - redirect: community.general.utm_network_interface_address - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.utm_network_interface_address - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.sophos_utm.utm_network_interface_address_info: - redirect: community.general.utm_network_interface_address_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.utm_network_interface_address_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.sophos_utm.utm_proxy_auth_profile: - redirect: community.general.utm_proxy_auth_profile - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.utm_proxy_auth_profile - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.sophos_utm.utm_proxy_exception: - redirect: community.general.utm_proxy_exception - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.utm_proxy_exception - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.sophos_utm.utm_proxy_frontend: - redirect: community.general.utm_proxy_frontend - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.utm_proxy_frontend - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.sophos_utm.utm_proxy_frontend_info: - redirect: community.general.utm_proxy_frontend_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.utm_proxy_frontend_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.sophos_utm.utm_proxy_location: - redirect: community.general.utm_proxy_location - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.utm_proxy_location - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - web_infrastructure.sophos_utm.utm_proxy_location_info: - redirect: community.general.utm_proxy_location_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.utm_proxy_location_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.vdo: - redirect: community.general.vdo - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.vdo - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - database.vertica.vertica_configuration: - redirect: community.general.vertica_configuration - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.vertica_configuration - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. + removal_version: 13.0.0 + warning_text: The typetalk service will be discontinued on Dec 2025. vertica_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.vertica_info instead. - database.vertica.vertica_info: - redirect: community.general.vertica_info - deprecation: + webfaction_app: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.vertica_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - database.vertica.vertica_role: - redirect: community.general.vertica_role - deprecation: + warning_text: This module relied on HTTPS APIs that do not exist anymore and + there is no clear path to update. + webfaction_db: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.vertica_role - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - database.vertica.vertica_schema: - redirect: community.general.vertica_schema - deprecation: + warning_text: This module relied on HTTPS APIs that do not exist anymore and + there is no clear path to update. + webfaction_domain: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.vertica_schema - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - database.vertica.vertica_user: - redirect: community.general.vertica_user - deprecation: + warning_text: This module relied on HTTPS APIs that do not exist anymore and + there is no clear path to update. + webfaction_mailbox: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.vertica_user - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - storage.vexata.vexata_eg: - redirect: community.general.vexata_eg - deprecation: + warning_text: This module relied on HTTPS APIs that do not exist anymore and + there is no clear path to update. + webfaction_site: + tombstone: removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.vexata_eg - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - storage.vexata.vexata_volume: - redirect: community.general.vexata_volume - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.vexata_volume - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.smartos.vmadm: - redirect: community.general.vmadm - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.vmadm - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.wakeonlan: - redirect: community.general.wakeonlan - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.wakeonlan - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.redfish.wdc_redfish_command: - redirect: community.general.wdc_redfish_command - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.wdc_redfish_command - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.redfish.wdc_redfish_info: - redirect: community.general.wdc_redfish_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.wdc_redfish_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.webfaction.webfaction_app: - redirect: community.general.webfaction_app - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.webfaction_app - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.webfaction.webfaction_db: - redirect: community.general.webfaction_db - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.webfaction_db - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.webfaction.webfaction_domain: - redirect: community.general.webfaction_domain - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.webfaction_domain - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.webfaction.webfaction_mailbox: - redirect: community.general.webfaction_mailbox - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.webfaction_mailbox - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.webfaction.webfaction_site: - redirect: community.general.webfaction_site - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.webfaction_site - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - files.xattr: - redirect: community.general.xattr - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.xattr - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.xbps: - redirect: community.general.xbps - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.xbps - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - remote_management.lenovoxcc.xcc_redfish_command: - redirect: community.general.xcc_redfish_command - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.xcc_redfish_command - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.misc.xenserver_facts: - redirect: community.general.xenserver_facts - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.xenserver_facts - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.xenserver.xenserver_guest: - redirect: community.general.xenserver_guest - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.xenserver_guest - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. + warning_text: This module relied on HTTPS APIs that do not exist anymore and + there is no clear path to update. xenserver_guest_facts: tombstone: removal_version: 3.0.0 warning_text: Use community.general.xenserver_guest_info instead. - cloud.xenserver.xenserver_guest_info: - redirect: community.general.xenserver_guest_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.xenserver_guest_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - cloud.xenserver.xenserver_guest_powerstate: - redirect: community.general.xenserver_guest_powerstate - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.xenserver_guest_powerstate - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.xfconf: - redirect: community.general.xfconf - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.xfconf - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.xfconf_info: - redirect: community.general.xfconf_info - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.xfconf_info - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.xfs_quota: - redirect: community.general.xfs_quota - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.xfs_quota - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - files.xml: - redirect: community.general.xml - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.xml - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.language.yarn: - redirect: community.general.yarn - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.yarn - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.yum_versionlock: - redirect: community.general.yum_versionlock - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.yum_versionlock - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - storage.zfs.zfs: - redirect: community.general.zfs - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.zfs - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - storage.zfs.zfs_delegate_admin: - redirect: community.general.zfs_delegate_admin - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.zfs_delegate_admin - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - storage.zfs.zfs_facts: - redirect: community.general.zfs_facts - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.zfs_facts - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - clustering.znode: - redirect: community.general.znode - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.znode - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - storage.zfs.zpool_facts: - redirect: community.general.zpool_facts - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.zpool_facts - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.zypper: - redirect: community.general.zypper - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.zypper - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. - packaging.os.zypper_repository: - redirect: community.general.zypper_repository - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.zypper_repository - modules. This has never been supported or documented, and will stop working - in community.general 9.0.0. doc_fragments: _gcp: redirect: community.google._gcp @@ -4505,8 +1052,46 @@ plugin_routing: redirect: community.kubevirt.kubevirt_vm_options nios: redirect: infoblox.nios_modules.nios + oracle: + deprecation: + removal_version: 13.0.0 + warning_text: Code is unmaintained here and official Oracle collection is available for a number of years. + oracle_creatable_resource: + deprecation: + removal_version: 13.0.0 + warning_text: Code is unmaintained here and official Oracle collection is available for a number of years. + oracle_display_name_option: + deprecation: + removal_version: 13.0.0 + warning_text: Code is unmaintained here and official Oracle collection is available for a number of years. + oracle_name_option: + deprecation: + removal_version: 13.0.0 + warning_text: Code is unmaintained here and official Oracle collection is available for a number of years. + oracle_tags: + deprecation: + removal_version: 13.0.0 + warning_text: Code is unmaintained here and official Oracle collection is available for a number of years. + oracle_wait_options: + deprecation: + removal_version: 13.0.0 + warning_text: Code is unmaintained here and official Oracle collection is available for a number of years. postgresql: redirect: community.postgresql.postgresql + proxmox: + redirect: community.proxmox.proxmox + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + purestorage: + tombstone: + removal_version: 12.0.0 + warning_text: The modules for purestorage were removed in community.general 3.0.0, this document fragment was left behind. + rackspace: + tombstone: + removal_version: 9.0.0 + warning_text: This doc fragment was used by rax modules, that relied on the deprecated + package pyrax. module_utils: docker.common: redirect: community.docker.common @@ -4524,37 +1109,45 @@ plugin_routing: redirect: community.kubevirt.kubevirt net_tools.nios.api: redirect: infoblox.nios_modules.api + oci_utils: + deprecation: + removal_version: 13.0.0 + warning_text: Code is unmaintained here and official Oracle collection is available for a number of years. postgresql: redirect: community.postgresql.postgresql + proxmox: + redirect: community.proxmox.proxmox + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. + pure: + tombstone: + removal_version: 12.0.0 + warning_text: The modules for purestorage were removed in community.general 3.0.0, this module util was left behind. + rax: + tombstone: + removal_version: 9.0.0 + warning_text: This module util relied on the deprecated package pyrax. remote_management.dellemc.dellemc_idrac: redirect: dellemc.openmanage.dellemc_idrac remote_management.dellemc.ome: redirect: dellemc.openmanage.ome - callback: - actionable: - tombstone: - removal_version: 2.0.0 - warning_text: Use the 'default' callback plugin with 'display_skipped_hosts - = no' and 'display_ok_hosts = no' options. - full_skip: - tombstone: - removal_version: 2.0.0 - warning_text: Use the 'default' callback plugin with 'display_skipped_hosts - = no' option. - osx_say: - redirect: community.general.say - stderr: - tombstone: - removal_version: 2.0.0 - warning_text: Use the 'default' callback plugin with 'display_failed_stderr - = yes' option. inventory: docker_machine: redirect: community.docker.docker_machine docker_swarm: redirect: community.docker.docker_swarm + proxmox: + redirect: community.proxmox.proxmox + deprecation: + removal_version: 15.0.0 + warning_text: The proxmox content has been moved to community.proxmox. kubevirt: redirect: community.kubevirt.kubevirt + stackpath_compute: + tombstone: + removal_version: 11.0.0 + warning_text: The company and the service were sunset in June 2024. filter: path_join: # The ansible.builtin.path_join filter has been added in ansible-base 2.10. @@ -4565,18 +1158,3 @@ plugin_routing: # for Ansible 2.9 or earlier. Now we only will have the redirect until we # eventually will deprecate and then remove it. redirect: ansible.builtin.path_join - action: - system.iptables_state: - redirect: community.general.iptables_state - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.iptables_state - action. This has never been supported or documented, and will stop working - in community.general 9.0.0. - system.shutdown: - redirect: community.general.shutdown - deprecation: - removal_version: 9.0.0 - warning_text: You are using an internal name to access the community.general.shutdown - action. This has never been supported or documented, and will stop working - in community.general 9.0.0. diff --git a/noxfile.py b/noxfile.py new file mode 100644 index 0000000000..9b2f92a9e1 --- /dev/null +++ b/noxfile.py @@ -0,0 +1,38 @@ +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +# SPDX-FileCopyrightText: 2025 Felix Fontein + +# /// script +# dependencies = ["nox>=2025.02.09", "antsibull-nox"] +# /// + +import sys + +import nox + + +try: + import antsibull_nox +except ImportError: + print("You need to install antsibull-nox in the same Python environment as nox.") + sys.exit(1) + + +antsibull_nox.load_antsibull_nox_toml() + + +@nox.session(name="aliases", python=False, default=True) +def aliases(session: nox.Session) -> None: + session.run("python", "tests/sanity/extra/aliases.py") + + +@nox.session(name="botmeta", default=True) +def botmeta(session: nox.Session) -> None: + session.install("PyYAML", "voluptuous") + session.run("python", "tests/sanity/extra/botmeta.py") + + +# Allow to run the noxfile with `python noxfile.py`, `pipx run noxfile.py`, or similar. +# Requires nox >= 2025.02.09 +if __name__ == "__main__": + nox.main() diff --git a/plugins/action/iptables_state.py b/plugins/action/iptables_state.py index f59a7298b6..dd6724476f 100644 --- a/plugins/action/iptables_state.py +++ b/plugins/action/iptables_state.py @@ -1,10 +1,8 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2020, quidame # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import time @@ -22,29 +20,37 @@ class ActionModule(ActionBase): _VALID_ARGS = frozenset(('path', 'state', 'table', 'noflush', 'counters', 'modprobe', 'ip_version', 'wait')) DEFAULT_SUDOABLE = True - MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO = ( - "This module doesn't support async>0 and poll>0 when its 'state' param " - "is set to 'restored'. To enable its rollback feature (that needs the " - "module to run asynchronously on the remote), please set task attribute " - "'poll' (=%s) to 0, and 'async' (=%s) to a value >2 and not greater than " - "'ansible_timeout' (=%s) (recommended).") - MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK = ( - "Attempts to restore iptables state without rollback in case of mistake " - "may lead the ansible controller to loose access to the hosts and never " - "regain it before fixing firewall rules through a serial console, or any " - "other way except SSH. Please set task attribute 'poll' (=%s) to 0, and " - "'async' (=%s) to a value >2 and not greater than 'ansible_timeout' (=%s) " - "(recommended).") - MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT = ( - "You attempt to restore iptables state with rollback in case of mistake, " - "but with settings that will lead this rollback to happen AFTER that the " - "controller will reach its own timeout. Please set task attribute 'poll' " - "(=%s) to 0, and 'async' (=%s) to a value >2 and not greater than " - "'ansible_timeout' (=%s) (recommended).") + @staticmethod + def msg_error__async_and_poll_not_zero(task_poll, task_async, max_timeout): + return ( + "This module doesn't support async>0 and poll>0 when its 'state' param " + "is set to 'restored'. To enable its rollback feature (that needs the " + "module to run asynchronously on the remote), please set task attribute " + f"'poll' (={task_poll}) to 0, and 'async' (={task_async}) to a value >2 and not greater than " + f"'ansible_timeout' (={max_timeout}) (recommended).") + + @staticmethod + def msg_warning__no_async_is_no_rollback(task_poll, task_async, max_timeout): + return ( + "Attempts to restore iptables state without rollback in case of mistake " + "may lead the ansible controller to loose access to the hosts and never " + "regain it before fixing firewall rules through a serial console, or any " + f"other way except SSH. Please set task attribute 'poll' (={task_poll}) to 0, and " + f"'async' (={task_async}) to a value >2 and not greater than 'ansible_timeout' (={max_timeout}) " + "(recommended).") + + @staticmethod + def msg_warning__async_greater_than_timeout(task_poll, task_async, max_timeout): + return ( + "You attempt to restore iptables state with rollback in case of mistake, " + "but with settings that will lead this rollback to happen AFTER that the " + "controller will reach its own timeout. Please set task attribute 'poll' " + f"(={task_poll}) to 0, and 'async' (={task_async}) to a value >2 and not greater than " + f"'ansible_timeout' (={max_timeout}) (recommended).") def _async_result(self, async_status_args, task_vars, timeout): ''' - Retrieve results of the asynchonous task, and display them in place of + Retrieve results of the asynchronous task, and display them in place of the async wrapper results (those with the ansible_job_id key). ''' async_status = self._task.copy() @@ -88,21 +94,25 @@ class ActionModule(ActionBase): max_timeout = self._connection._play_context.timeout module_args = self._task.args + async_status_args = {} + starter_cmd = None + confirm_cmd = None + if module_args.get('state', None) == 'restored': if not wrap_async: if not check_mode: - display.warning(self.MSG_WARNING__NO_ASYNC_IS_NO_ROLLBACK % ( + display.warning(self.msg_error__async_and_poll_not_zero( task_poll, task_async, max_timeout)) elif task_poll: - raise AnsibleActionFail(self.MSG_ERROR__ASYNC_AND_POLL_NOT_ZERO % ( + raise AnsibleActionFail(self.msg_warning__no_async_is_no_rollback( task_poll, task_async, max_timeout)) else: if task_async > max_timeout and not check_mode: - display.warning(self.MSG_WARNING__ASYNC_GREATER_THAN_TIMEOUT % ( + display.warning(self.msg_warning__async_greater_than_timeout( task_poll, task_async, max_timeout)) @@ -115,10 +125,10 @@ class ActionModule(ActionBase): # remote and local sides (if not the same, make the loop # longer on the controller); and set a backup file path. module_args['_timeout'] = task_async - module_args['_back'] = '%s/iptables.state' % async_dir + module_args['_back'] = f'{async_dir}/iptables.state' async_status_args = dict(mode='status') - confirm_cmd = 'rm -f %s' % module_args['_back'] - starter_cmd = 'touch %s.starter' % module_args['_back'] + confirm_cmd = f"rm -f {module_args['_back']}" + starter_cmd = f"touch {module_args['_back']}.starter" remaining_time = max(task_async, max_timeout) # do work! diff --git a/plugins/action/shutdown.py b/plugins/action/shutdown.py index c2860f1d6f..d2a9d3c2b7 100644 --- a/plugins/action/shutdown.py +++ b/plugins/action/shutdown.py @@ -1,12 +1,11 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2020, Amin Vakil # Copyright (c) 2016-2018, Matt Davis # Copyright (c) 2018, Sam Doran # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + from ansible.errors import AnsibleError, AnsibleConnectionFailure from ansible.module_utils.common.text.converters import to_native, to_text @@ -17,6 +16,10 @@ from ansible.utils.display import Display display = Display() +def fmt(mapping, key): + return to_native(mapping[key]).strip() + + class TimedOutException(Exception): pass @@ -44,7 +47,7 @@ class ActionModule(ActionBase): SHUTDOWN_COMMAND_ARGS = { 'alpine': '', 'void': '-h +{delay_min} "{message}"', - 'freebsd': '-h +{delay_sec}s "{message}"', + 'freebsd': '-p +{delay_sec}s "{message}"', 'linux': DEFAULT_SHUTDOWN_COMMAND_ARGS, 'macosx': '-h +{delay_min} "{message}"', 'openbsd': '-h +{delay_min} "{message}"', @@ -80,35 +83,41 @@ class ActionModule(ActionBase): getattr(self, default_value)))) return value - def get_shutdown_command_args(self, distribution): - args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS') - # Convert seconds to minutes. If less that 60, set it to 0. - delay_sec = self.delay - shutdown_message = self._task.args.get('msg', self.DEFAULT_SHUTDOWN_MESSAGE) - return args.format(delay_sec=delay_sec, delay_min=delay_sec // 60, message=shutdown_message) - def get_distribution(self, task_vars): # FIXME: only execute the module if we don't already have the facts we need distribution = {} - display.debug('{action}: running setup module to get distribution'.format(action=self._task.action)) + display.debug(f'{self._task.action}: running setup module to get distribution') module_output = self._execute_module( task_vars=task_vars, module_name='ansible.legacy.setup', module_args={'gather_subset': 'min'}) try: if module_output.get('failed', False): - raise AnsibleError('Failed to determine system distribution. {0}, {1}'.format( - to_native(module_output['module_stdout']).strip(), - to_native(module_output['module_stderr']).strip())) + raise AnsibleError(f"Failed to determine system distribution. {fmt(module_output, 'module_stdout')}, {fmt(module_output, 'module_stderr')}") distribution['name'] = module_output['ansible_facts']['ansible_distribution'].lower() - distribution['version'] = to_text(module_output['ansible_facts']['ansible_distribution_version'].split('.')[0]) + distribution['version'] = to_text( + module_output['ansible_facts']['ansible_distribution_version'].split('.')[0]) distribution['family'] = to_text(module_output['ansible_facts']['ansible_os_family'].lower()) - display.debug("{action}: distribution: {dist}".format(action=self._task.action, dist=distribution)) + display.debug(f"{self._task.action}: distribution: {distribution}") return distribution except KeyError as ke: - raise AnsibleError('Failed to get distribution information. Missing "{0}" in output.'.format(ke.args[0])) + raise AnsibleError(f'Failed to get distribution information. Missing "{ke.args[0]}" in output.') def get_shutdown_command(self, task_vars, distribution): + def find_command(command, find_search_paths): + display.debug(f'{self._task.action}: running find module looking in {find_search_paths} to get path for "{command}"') + find_result = self._execute_module( + task_vars=task_vars, + # prevent collection search by calling with ansible.legacy (still allows library/ override of find) + module_name='ansible.legacy.find', + module_args={ + 'paths': find_search_paths, + 'patterns': [command], + 'file_type': 'any' + } + ) + return [x['path'] for x in find_result['files']] + shutdown_bin = self._get_value_from_facts('SHUTDOWN_COMMANDS', distribution, 'DEFAULT_SHUTDOWN_COMMAND') default_search_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin'] search_paths = self._task.args.get('search_paths', default_search_paths) @@ -118,62 +127,61 @@ class ActionModule(ActionBase): if is_string(search_paths): search_paths = [search_paths] - # Error if we didn't get a list - err_msg = "'search_paths' must be a string or flat list of strings, got {0}" try: incorrect_type = any(not is_string(x) for x in search_paths) if not isinstance(search_paths, list) or incorrect_type: raise TypeError except TypeError: - raise AnsibleError(err_msg.format(search_paths)) + # Error if we didn't get a list + err_msg = f"'search_paths' must be a string or flat list of strings, got {search_paths}" + raise AnsibleError(err_msg) - display.debug('{action}: running find module looking in {paths} to get path for "{command}"'.format( - action=self._task.action, - command=shutdown_bin, - paths=search_paths)) - find_result = self._execute_module( - task_vars=task_vars, - # prevent collection search by calling with ansible.legacy (still allows library/ override of find) - module_name='ansible.legacy.find', - module_args={ - 'paths': search_paths, - 'patterns': [shutdown_bin], - 'file_type': 'any' - } - ) + full_path = find_command(shutdown_bin, search_paths) # find the path to the shutdown command + if not full_path: # if we could not find the shutdown command - full_path = [x['path'] for x in find_result['files']] - if not full_path: - raise AnsibleError('Unable to find command "{0}" in search paths: {1}'.format(shutdown_bin, search_paths)) - self._shutdown_command = full_path[0] - return self._shutdown_command + # tell the user we will try with systemd + display.vvv(f'Unable to find command "{shutdown_bin}" in search paths: {search_paths}, will attempt a shutdown using systemd directly.') + systemctl_search_paths = ['/bin', '/usr/bin'] + full_path = find_command('systemctl', systemctl_search_paths) # find the path to the systemctl command + if not full_path: # if we couldn't find systemctl + raise AnsibleError( + f'Could not find command "{shutdown_bin}" in search paths: {search_paths} or systemctl' + f' command in search paths: {systemctl_search_paths}, unable to shutdown.') # we give up here + else: + return f"{full_path[0]} poweroff" # done, since we cannot use args with systemd shutdown + + # systemd case taken care of, here we add args to the command + args = self._get_value_from_facts('SHUTDOWN_COMMAND_ARGS', distribution, 'DEFAULT_SHUTDOWN_COMMAND_ARGS') + # Convert seconds to minutes. If less that 60, set it to 0. + delay_sec = self.delay + shutdown_message = self._task.args.get('msg', self.DEFAULT_SHUTDOWN_MESSAGE) + + af = args.format(delay_sec=delay_sec, delay_min=delay_sec // 60, message=shutdown_message) + return f'{full_path[0]} {af}' def perform_shutdown(self, task_vars, distribution): result = {} shutdown_result = {} - shutdown_command = self.get_shutdown_command(task_vars, distribution) - shutdown_command_args = self.get_shutdown_command_args(distribution) - shutdown_command_exec = '{0} {1}'.format(shutdown_command, shutdown_command_args) + shutdown_command_exec = self.get_shutdown_command(task_vars, distribution) self.cleanup(force=True) try: - display.vvv("{action}: shutting down server...".format(action=self._task.action)) - display.debug("{action}: shutting down server with command '{command}'".format(action=self._task.action, command=shutdown_command_exec)) + display.vvv(f"{self._task.action}: shutting down server...") + display.debug(f"{self._task.action}: shutting down server with command '{shutdown_command_exec}'") if self._play_context.check_mode: shutdown_result['rc'] = 0 else: shutdown_result = self._low_level_execute_command(shutdown_command_exec, sudoable=self.DEFAULT_SUDOABLE) except AnsibleConnectionFailure as e: # If the connection is closed too quickly due to the system being shutdown, carry on - display.debug('{action}: AnsibleConnectionFailure caught and handled: {error}'.format(action=self._task.action, error=to_text(e))) + display.debug( + f'{self._task.action}: AnsibleConnectionFailure caught and handled: {e}') shutdown_result['rc'] = 0 if shutdown_result['rc'] != 0: result['failed'] = True result['shutdown'] = False - result['msg'] = "Shutdown command failed. Error was {stdout}, {stderr}".format( - stdout=to_native(shutdown_result['stdout'].strip()), - stderr=to_native(shutdown_result['stderr'].strip())) + result['msg'] = f"Shutdown command failed. Error was {fmt(shutdown_result, 'stdout')}, {fmt(shutdown_result, 'stderr')}" return result result['failed'] = False @@ -186,7 +194,7 @@ class ActionModule(ActionBase): # If running with local connection, fail so we don't shutdown ourself if self._connection.transport == 'local' and (not self._play_context.check_mode): - msg = 'Running {0} with local connection would shutdown the control node.'.format(self._task.action) + msg = f'Running {self._task.action} with local connection would shutdown the control node.' return {'changed': False, 'elapsed': 0, 'shutdown': False, 'failed': True, 'msg': msg} if task_vars is None: diff --git a/plugins/become/doas.py b/plugins/become/doas.py index 69e730aad4..84efe31ac4 100644 --- a/plugins/become/doas.py +++ b/plugins/become/doas.py @@ -1,84 +1,91 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: doas - short_description: Do As user +DOCUMENTATION = r""" +name: doas +short_description: Do As user +description: + - This become plugins allows your remote/login user to execute commands as another user using the C(doas) utility. +author: Ansible Core Team +options: + become_user: + description: User you 'become' to execute the task. + type: string + ini: + - section: privilege_escalation + key: become_user + - section: doas_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_doas_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_DOAS_USER + become_exe: + description: C(doas) executable. + type: string + default: doas + ini: + - section: privilege_escalation + key: become_exe + - section: doas_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_doas_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_DOAS_EXE + become_flags: + description: Options to pass to C(doas). + type: string + default: '' + ini: + - section: privilege_escalation + key: become_flags + - section: doas_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_doas_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_DOAS_FLAGS + become_pass: + description: Password for C(doas) prompt. + type: string + required: false + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_doas_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_DOAS_PASS + ini: + - section: doas_become_plugin + key: password + prompt_l10n: description: - - This become plugins allows your remote/login user to execute commands as another user via the doas utility. - author: Ansible Core Team - options: - become_user: - description: User you 'become' to execute the task - ini: - - section: privilege_escalation - key: become_user - - section: doas_become_plugin - key: user - vars: - - name: ansible_become_user - - name: ansible_doas_user - env: - - name: ANSIBLE_BECOME_USER - - name: ANSIBLE_DOAS_USER - become_exe: - description: Doas executable - default: doas - ini: - - section: privilege_escalation - key: become_exe - - section: doas_become_plugin - key: executable - vars: - - name: ansible_become_exe - - name: ansible_doas_exe - env: - - name: ANSIBLE_BECOME_EXE - - name: ANSIBLE_DOAS_EXE - become_flags: - description: Options to pass to doas - default: '' - ini: - - section: privilege_escalation - key: become_flags - - section: doas_become_plugin - key: flags - vars: - - name: ansible_become_flags - - name: ansible_doas_flags - env: - - name: ANSIBLE_BECOME_FLAGS - - name: ANSIBLE_DOAS_FLAGS - become_pass: - description: password for doas prompt - required: false - vars: - - name: ansible_become_password - - name: ansible_become_pass - - name: ansible_doas_pass - env: - - name: ANSIBLE_BECOME_PASS - - name: ANSIBLE_DOAS_PASS - ini: - - section: doas_become_plugin - key: password - prompt_l10n: - description: - - List of localized strings to match for prompt detection - - If empty we'll use the built in one - default: [] - ini: - - section: doas_become_plugin - key: localized_prompts - vars: - - name: ansible_doas_prompt_l10n - env: - - name: ANSIBLE_DOAS_PROMPT_L10N -''' + - List of localized strings to match for prompt detection. + - If empty the plugin uses the built-in one. + type: list + elements: string + default: [] + ini: + - section: doas_become_plugin + key: localized_prompts + vars: + - name: ansible_doas_prompt_l10n + env: + - name: ANSIBLE_DOAS_PROMPT_L10N +notes: + - This become plugin does not work when connection pipelining is enabled. With ansible-core 2.19+, using it automatically + disables pipelining. On ansible-core 2.18 and before, pipelining must explicitly be disabled by the user. +""" import re @@ -94,6 +101,10 @@ class BecomeModule(BecomeBase): fail = ('Permission denied',) missing = ('Authorization required',) + # See https://github.com/ansible-collections/community.general/issues/9977, + # https://github.com/ansible/ansible/pull/78111 + pipelining = False + def check_password_prompt(self, b_output): ''' checks if the expected password prompt exists in b_output ''' @@ -119,9 +130,9 @@ class BecomeModule(BecomeBase): flags += ' -n' become_user = self.get_option('become_user') - user = '-u %s' % (become_user) if become_user else '' + user = f'-u {become_user}' if become_user else '' success_cmd = self._build_success_command(cmd, shell, noexe=True) executable = getattr(shell, 'executable', shell.SHELL_FAMILY) - return '%s %s %s %s -c %s' % (become_exe, flags, user, executable, success_cmd) + return f'{become_exe} {flags} {user} {executable} -c {success_cmd}' diff --git a/plugins/become/dzdo.py b/plugins/become/dzdo.py index a358e84e39..dad05eb34e 100644 --- a/plugins/become/dzdo.py +++ b/plugins/become/dzdo.py @@ -1,72 +1,74 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: dzdo - short_description: Centrify's Direct Authorize - description: - - This become plugins allows your remote/login user to execute commands as another user via the dzdo utility. - author: Ansible Core Team - options: - become_user: - description: User you 'become' to execute the task - ini: - - section: privilege_escalation - key: become_user - - section: dzdo_become_plugin - key: user - vars: - - name: ansible_become_user - - name: ansible_dzdo_user - env: - - name: ANSIBLE_BECOME_USER - - name: ANSIBLE_DZDO_USER - become_exe: - description: Dzdo executable - default: dzdo - ini: - - section: privilege_escalation - key: become_exe - - section: dzdo_become_plugin - key: executable - vars: - - name: ansible_become_exe - - name: ansible_dzdo_exe - env: - - name: ANSIBLE_BECOME_EXE - - name: ANSIBLE_DZDO_EXE - become_flags: - description: Options to pass to dzdo - default: -H -S -n - ini: - - section: privilege_escalation - key: become_flags - - section: dzdo_become_plugin - key: flags - vars: - - name: ansible_become_flags - - name: ansible_dzdo_flags - env: - - name: ANSIBLE_BECOME_FLAGS - - name: ANSIBLE_DZDO_FLAGS - become_pass: - description: Options to pass to dzdo - required: false - vars: - - name: ansible_become_password - - name: ansible_become_pass - - name: ansible_dzdo_pass - env: - - name: ANSIBLE_BECOME_PASS - - name: ANSIBLE_DZDO_PASS - ini: - - section: dzdo_become_plugin - key: password -''' +DOCUMENTATION = r""" +name: dzdo +short_description: Centrify's Direct Authorize +description: + - This become plugins allows your remote/login user to execute commands as another user using the C(dzdo) utility. +author: Ansible Core Team +options: + become_user: + description: User you 'become' to execute the task. + type: string + ini: + - section: privilege_escalation + key: become_user + - section: dzdo_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_dzdo_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_DZDO_USER + become_exe: + description: C(dzdo) executable. + type: string + default: dzdo + ini: + - section: privilege_escalation + key: become_exe + - section: dzdo_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_dzdo_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_DZDO_EXE + become_flags: + description: Options to pass to C(dzdo). + type: string + default: -H -S -n + ini: + - section: privilege_escalation + key: become_flags + - section: dzdo_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_dzdo_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_DZDO_FLAGS + become_pass: + description: Options to pass to C(dzdo). + type: string + required: false + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_dzdo_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_DZDO_PASS + ini: + - section: dzdo_become_plugin + key: password +""" from ansible.plugins.become import BecomeBase @@ -88,10 +90,10 @@ class BecomeModule(BecomeBase): flags = self.get_option('become_flags') if self.get_option('become_pass'): - self.prompt = '[dzdo via ansible, key=%s] password:' % self._id - flags = '%s -p "%s"' % (flags.replace('-n', ''), self.prompt) + self.prompt = f'[dzdo via ansible, key={self._id}] password:' + flags = f"{flags.replace('-n', '')} -p \"{self.prompt}\"" become_user = self.get_option('become_user') - user = '-u %s' % (become_user) if become_user else '' + user = f'-u {become_user}' if become_user else '' - return ' '.join([becomecmd, flags, user, self._build_success_command(cmd, shell)]) + return f"{becomecmd} {flags} {user} {self._build_success_command(cmd, shell)}" diff --git a/plugins/become/ksu.py b/plugins/become/ksu.py index fa2f66864a..0ffba62385 100644 --- a/plugins/become/ksu.py +++ b/plugins/become/ksu.py @@ -1,85 +1,89 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: ksu - short_description: Kerberos substitute user +DOCUMENTATION = r""" +name: ksu +short_description: Kerberos substitute user +description: + - This become plugins allows your remote/login user to execute commands as another user using the C(ksu) utility. +author: Ansible Core Team +options: + become_user: + description: User you 'become' to execute the task. + type: string + ini: + - section: privilege_escalation + key: become_user + - section: ksu_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_ksu_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_KSU_USER + required: true + become_exe: + description: C(ksu) executable. + type: string + default: ksu + ini: + - section: privilege_escalation + key: become_exe + - section: ksu_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_ksu_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_KSU_EXE + become_flags: + description: Options to pass to C(ksu). + type: string + default: '' + ini: + - section: privilege_escalation + key: become_flags + - section: ksu_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_ksu_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_KSU_FLAGS + become_pass: + description: C(ksu) password. + type: string + required: false + vars: + - name: ansible_ksu_pass + - name: ansible_become_pass + - name: ansible_become_password + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_KSU_PASS + ini: + - section: ksu_become_plugin + key: password + prompt_l10n: description: - - This become plugins allows your remote/login user to execute commands as another user via the ksu utility. - author: Ansible Core Team - options: - become_user: - description: User you 'become' to execute the task - ini: - - section: privilege_escalation - key: become_user - - section: ksu_become_plugin - key: user - vars: - - name: ansible_become_user - - name: ansible_ksu_user - env: - - name: ANSIBLE_BECOME_USER - - name: ANSIBLE_KSU_USER - required: true - become_exe: - description: Su executable - default: ksu - ini: - - section: privilege_escalation - key: become_exe - - section: ksu_become_plugin - key: executable - vars: - - name: ansible_become_exe - - name: ansible_ksu_exe - env: - - name: ANSIBLE_BECOME_EXE - - name: ANSIBLE_KSU_EXE - become_flags: - description: Options to pass to ksu - default: '' - ini: - - section: privilege_escalation - key: become_flags - - section: ksu_become_plugin - key: flags - vars: - - name: ansible_become_flags - - name: ansible_ksu_flags - env: - - name: ANSIBLE_BECOME_FLAGS - - name: ANSIBLE_KSU_FLAGS - become_pass: - description: ksu password - required: false - vars: - - name: ansible_ksu_pass - - name: ansible_become_pass - - name: ansible_become_password - env: - - name: ANSIBLE_BECOME_PASS - - name: ANSIBLE_KSU_PASS - ini: - - section: ksu_become_plugin - key: password - prompt_l10n: - description: - - List of localized strings to match for prompt detection - - If empty we'll use the built in one - default: [] - ini: - - section: ksu_become_plugin - key: localized_prompts - vars: - - name: ansible_ksu_prompt_l10n - env: - - name: ANSIBLE_KSU_PROMPT_L10N -''' + - List of localized strings to match for prompt detection. + - If empty the plugin uses the built-in one. + type: list + elements: string + default: [] + ini: + - section: ksu_become_plugin + key: localized_prompts + vars: + - name: ansible_ksu_prompt_l10n + env: + - name: ANSIBLE_KSU_PROMPT_L10N +""" import re @@ -118,4 +122,4 @@ class BecomeModule(BecomeBase): flags = self.get_option('become_flags') user = self.get_option('become_user') - return '%s %s %s -e %s ' % (exe, user, flags, self._build_success_command(cmd, shell)) + return f'{exe} {user} {flags} -e {self._build_success_command(cmd, shell)} ' diff --git a/plugins/become/machinectl.py b/plugins/become/machinectl.py index 461a3f635d..685f39f5d8 100644 --- a/plugins/become/machinectl.py +++ b/plugins/become/machinectl.py @@ -1,95 +1,99 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: machinectl - short_description: Systemd's machinectl privilege escalation - description: - - This become plugins allows your remote/login user to execute commands as another user via the machinectl utility. - author: Ansible Core Team - options: - become_user: - description: User you 'become' to execute the task - default: '' - ini: - - section: privilege_escalation - key: become_user - - section: machinectl_become_plugin - key: user - vars: - - name: ansible_become_user - - name: ansible_machinectl_user - env: - - name: ANSIBLE_BECOME_USER - - name: ANSIBLE_MACHINECTL_USER - become_exe: - description: Machinectl executable - default: machinectl - ini: - - section: privilege_escalation - key: become_exe - - section: machinectl_become_plugin - key: executable - vars: - - name: ansible_become_exe - - name: ansible_machinectl_exe - env: - - name: ANSIBLE_BECOME_EXE - - name: ANSIBLE_MACHINECTL_EXE - become_flags: - description: Options to pass to machinectl - default: '' - ini: - - section: privilege_escalation - key: become_flags - - section: machinectl_become_plugin - key: flags - vars: - - name: ansible_become_flags - - name: ansible_machinectl_flags - env: - - name: ANSIBLE_BECOME_FLAGS - - name: ANSIBLE_MACHINECTL_FLAGS - become_pass: - description: Password for machinectl - required: false - vars: - - name: ansible_become_password - - name: ansible_become_pass - - name: ansible_machinectl_pass - env: - - name: ANSIBLE_BECOME_PASS - - name: ANSIBLE_MACHINECTL_PASS - ini: - - section: machinectl_become_plugin - key: password - notes: - - When not using this plugin with user C(root), it only works correctly with a polkit rule which will alter - the behaviour of machinectl. This rule must alter the prompt behaviour to ask directly for the user credentials, - if the user is allowed to perform the action (take a look at the examples section). - If such a rule is not present the plugin only work if it is used in context with the root user, - because then no further prompt will be shown by machinectl. -''' +DOCUMENTATION = r""" +name: machinectl +short_description: Systemd's machinectl privilege escalation +description: + - This become plugins allows your remote/login user to execute commands as another user using the C(machinectl) utility. +author: Ansible Core Team +options: + become_user: + description: User you 'become' to execute the task. + type: string + default: '' + ini: + - section: privilege_escalation + key: become_user + - section: machinectl_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_machinectl_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_MACHINECTL_USER + become_exe: + description: C(machinectl) executable. + type: string + default: machinectl + ini: + - section: privilege_escalation + key: become_exe + - section: machinectl_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_machinectl_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_MACHINECTL_EXE + become_flags: + description: Options to pass to C(machinectl). + type: string + default: '' + ini: + - section: privilege_escalation + key: become_flags + - section: machinectl_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_machinectl_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_MACHINECTL_FLAGS + become_pass: + description: Password for C(machinectl). + type: string + required: false + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_machinectl_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_MACHINECTL_PASS + ini: + - section: machinectl_become_plugin + key: password +notes: + - When not using this plugin with user V(root), it only works correctly with a polkit rule which alters the behaviour + of C(machinectl). This rule must alter the prompt behaviour to ask directly for the user credentials, if the user is allowed + to perform the action (take a look at the examples section). If such a rule is not present the plugin only works if it + is used in context with the root user, because then no further prompt is shown by C(machinectl). + - This become plugin does not work when connection pipelining is enabled. With ansible-core 2.19+, using it automatically + disables pipelining. On ansible-core 2.18 and before, pipelining must explicitly be disabled by the user. +""" -EXAMPLES = r''' +EXAMPLES = r""" # A polkit rule needed to use the module with a non-root user. # See the Notes section for details. -60-machinectl-fast-user-auth.rules: | - polkit.addRule(function(action, subject) { - if(action.id == "org.freedesktop.machine1.host-shell" && subject.isInGroup("wheel")) { - return polkit.Result.AUTH_SELF_KEEP; - } - }); -''' +/etc/polkit-1/rules.d/60-machinectl-fast-user-auth.rules: |- + polkit.addRule(function(action, subject) { + if(action.id == "org.freedesktop.machine1.host-shell" && + subject.isInGroup("wheel")) { + return polkit.Result.AUTH_SELF_KEEP; + } + }); +""" from re import compile as re_compile from ansible.plugins.become import BecomeBase -from ansible.module_utils._text import to_bytes +from ansible.module_utils.common.text.converters import to_bytes ansi_color_codes = re_compile(to_bytes(r'\x1B\[[0-9;]+m')) @@ -102,6 +106,11 @@ class BecomeModule(BecomeBase): prompt = 'Password: ' fail = ('==== AUTHENTICATION FAILED ====',) success = ('==== AUTHENTICATION COMPLETE ====',) + require_tty = True # see https://github.com/ansible-collections/community.general/issues/6932 + + # See https://github.com/ansible/ansible/issues/81254, + # https://github.com/ansible/ansible/pull/78111 + pipelining = False @staticmethod def remove_ansi_codes(line): @@ -117,7 +126,7 @@ class BecomeModule(BecomeBase): flags = self.get_option('become_flags') user = self.get_option('become_user') - return '%s -q shell %s %s@ %s' % (become, flags, user, self._build_success_command(cmd, shell)) + return f'{become} -q shell {flags} {user}@ {self._build_success_command(cmd, shell)}' def check_success(self, b_output): b_output = self.remove_ansi_codes(b_output) diff --git a/plugins/become/pbrun.py b/plugins/become/pbrun.py index 7d1437191e..c9eb975427 100644 --- a/plugins/become/pbrun.py +++ b/plugins/become/pbrun.py @@ -1,84 +1,86 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: pbrun - short_description: PowerBroker run - description: - - This become plugins allows your remote/login user to execute commands as another user via the pbrun utility. - author: Ansible Core Team - options: - become_user: - description: User you 'become' to execute the task - default: '' - ini: - - section: privilege_escalation - key: become_user - - section: pbrun_become_plugin - key: user - vars: - - name: ansible_become_user - - name: ansible_pbrun_user - env: - - name: ANSIBLE_BECOME_USER - - name: ANSIBLE_PBRUN_USER - become_exe: - description: Sudo executable - default: pbrun - ini: - - section: privilege_escalation - key: become_exe - - section: pbrun_become_plugin - key: executable - vars: - - name: ansible_become_exe - - name: ansible_pbrun_exe - env: - - name: ANSIBLE_BECOME_EXE - - name: ANSIBLE_PBRUN_EXE - become_flags: - description: Options to pass to pbrun - default: '' - ini: - - section: privilege_escalation - key: become_flags - - section: pbrun_become_plugin - key: flags - vars: - - name: ansible_become_flags - - name: ansible_pbrun_flags - env: - - name: ANSIBLE_BECOME_FLAGS - - name: ANSIBLE_PBRUN_FLAGS - become_pass: - description: Password for pbrun - required: false - vars: - - name: ansible_become_password - - name: ansible_become_pass - - name: ansible_pbrun_pass - env: - - name: ANSIBLE_BECOME_PASS - - name: ANSIBLE_PBRUN_PASS - ini: - - section: pbrun_become_plugin - key: password - wrap_exe: - description: Toggle to wrap the command pbrun calls in 'shell -c' or not - default: false - type: bool - ini: - - section: pbrun_become_plugin - key: wrap_execution - vars: - - name: ansible_pbrun_wrap_execution - env: - - name: ANSIBLE_PBRUN_WRAP_EXECUTION -''' +DOCUMENTATION = r""" +name: pbrun +short_description: PowerBroker run +description: + - This become plugins allows your remote/login user to execute commands as another user using the C(pbrun) utility. +author: Ansible Core Team +options: + become_user: + description: User you 'become' to execute the task. + type: string + default: '' + ini: + - section: privilege_escalation + key: become_user + - section: pbrun_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_pbrun_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_PBRUN_USER + become_exe: + description: C(pbrun) executable. + type: string + default: pbrun + ini: + - section: privilege_escalation + key: become_exe + - section: pbrun_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_pbrun_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_PBRUN_EXE + become_flags: + description: Options to pass to C(pbrun). + type: string + default: '' + ini: + - section: privilege_escalation + key: become_flags + - section: pbrun_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_pbrun_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_PBRUN_FLAGS + become_pass: + description: Password for C(pbrun). + type: string + required: false + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_pbrun_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_PBRUN_PASS + ini: + - section: pbrun_become_plugin + key: password + wrap_exe: + description: Toggle to wrap the command C(pbrun) calls in C(shell -c) or not. + default: false + type: bool + ini: + - section: pbrun_become_plugin + key: wrap_execution + vars: + - name: ansible_pbrun_wrap_execution + env: + - name: ANSIBLE_PBRUN_WRAP_EXECUTION +""" from ansible.plugins.become import BecomeBase @@ -99,7 +101,7 @@ class BecomeModule(BecomeBase): flags = self.get_option('become_flags') become_user = self.get_option('become_user') - user = '-u %s' % (become_user) if become_user else '' + user = f'-u {become_user}' if become_user else '' noexe = not self.get_option('wrap_exe') - return ' '.join([become_exe, flags, user, self._build_success_command(cmd, shell, noexe=noexe)]) + return f"{become_exe} {flags} {user} {self._build_success_command(cmd, shell, noexe=noexe)}" diff --git a/plugins/become/pfexec.py b/plugins/become/pfexec.py index 392ee961f5..2e7df0f6c0 100644 --- a/plugins/become/pfexec.py +++ b/plugins/become/pfexec.py @@ -1,89 +1,91 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: pfexec - short_description: profile based execution +DOCUMENTATION = r""" +name: pfexec +short_description: Profile based execution +description: + - This become plugins allows your remote/login user to execute commands as another user using the C(pfexec) utility. +author: Ansible Core Team +options: + become_user: description: - - This become plugins allows your remote/login user to execute commands as another user via the pfexec utility. - author: Ansible Core Team - options: - become_user: - description: - - User you 'become' to execute the task - - This plugin ignores this setting as pfexec uses it's own C(exec_attr) to figure this out, - but it is supplied here for Ansible to make decisions needed for the task execution, like file permissions. - default: root - ini: - - section: privilege_escalation - key: become_user - - section: pfexec_become_plugin - key: user - vars: - - name: ansible_become_user - - name: ansible_pfexec_user - env: - - name: ANSIBLE_BECOME_USER - - name: ANSIBLE_PFEXEC_USER - become_exe: - description: Sudo executable - default: pfexec - ini: - - section: privilege_escalation - key: become_exe - - section: pfexec_become_plugin - key: executable - vars: - - name: ansible_become_exe - - name: ansible_pfexec_exe - env: - - name: ANSIBLE_BECOME_EXE - - name: ANSIBLE_PFEXEC_EXE - become_flags: - description: Options to pass to pfexec - default: -H -S -n - ini: - - section: privilege_escalation - key: become_flags - - section: pfexec_become_plugin - key: flags - vars: - - name: ansible_become_flags - - name: ansible_pfexec_flags - env: - - name: ANSIBLE_BECOME_FLAGS - - name: ANSIBLE_PFEXEC_FLAGS - become_pass: - description: pfexec password - required: false - vars: - - name: ansible_become_password - - name: ansible_become_pass - - name: ansible_pfexec_pass - env: - - name: ANSIBLE_BECOME_PASS - - name: ANSIBLE_PFEXEC_PASS - ini: - - section: pfexec_become_plugin - key: password - wrap_exe: - description: Toggle to wrap the command pfexec calls in 'shell -c' or not - default: false - type: bool - ini: - - section: pfexec_become_plugin - key: wrap_execution - vars: - - name: ansible_pfexec_wrap_execution - env: - - name: ANSIBLE_PFEXEC_WRAP_EXECUTION - notes: - - This plugin ignores I(become_user) as pfexec uses it's own C(exec_attr) to figure this out. -''' + - User you 'become' to execute the task. + - This plugin ignores this setting as pfexec uses its own C(exec_attr) to figure this out, but it is supplied here for + Ansible to make decisions needed for the task execution, like file permissions. + type: string + default: root + ini: + - section: privilege_escalation + key: become_user + - section: pfexec_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_pfexec_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_PFEXEC_USER + become_exe: + description: C(pfexec) executable. + type: string + default: pfexec + ini: + - section: privilege_escalation + key: become_exe + - section: pfexec_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_pfexec_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_PFEXEC_EXE + become_flags: + description: Options to pass to C(pfexec). + type: string + default: -H -S -n + ini: + - section: privilege_escalation + key: become_flags + - section: pfexec_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_pfexec_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_PFEXEC_FLAGS + become_pass: + description: C(pfexec) password. + type: string + required: false + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_pfexec_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_PFEXEC_PASS + ini: + - section: pfexec_become_plugin + key: password + wrap_exe: + description: Toggle to wrap the command C(pfexec) calls in C(shell -c) or not. + default: false + type: bool + ini: + - section: pfexec_become_plugin + key: wrap_execution + vars: + - name: ansible_pfexec_wrap_execution + env: + - name: ANSIBLE_PFEXEC_WRAP_EXECUTION +notes: + - This plugin ignores O(become_user) as pfexec uses its own C(exec_attr) to figure this out. +""" from ansible.plugins.become import BecomeBase @@ -102,4 +104,4 @@ class BecomeModule(BecomeBase): flags = self.get_option('become_flags') noexe = not self.get_option('wrap_exe') - return '%s %s %s' % (exe, flags, self._build_success_command(cmd, shell, noexe=noexe)) + return f'{exe} {flags} {self._build_success_command(cmd, shell, noexe=noexe)}' diff --git a/plugins/become/pmrun.py b/plugins/become/pmrun.py index 74b633f09a..413600cdbf 100644 --- a/plugins/become/pmrun.py +++ b/plugins/become/pmrun.py @@ -1,64 +1,65 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: pmrun - short_description: Privilege Manager run - description: - - This become plugins allows your remote/login user to execute commands as another user via the pmrun utility. - author: Ansible Core Team - options: - become_exe: - description: Sudo executable - default: pmrun - ini: - - section: privilege_escalation - key: become_exe - - section: pmrun_become_plugin - key: executable - vars: - - name: ansible_become_exe - - name: ansible_pmrun_exe - env: - - name: ANSIBLE_BECOME_EXE - - name: ANSIBLE_PMRUN_EXE - become_flags: - description: Options to pass to pmrun - default: '' - ini: - - section: privilege_escalation - key: become_flags - - section: pmrun_become_plugin - key: flags - vars: - - name: ansible_become_flags - - name: ansible_pmrun_flags - env: - - name: ANSIBLE_BECOME_FLAGS - - name: ANSIBLE_PMRUN_FLAGS - become_pass: - description: pmrun password - required: false - vars: - - name: ansible_become_password - - name: ansible_become_pass - - name: ansible_pmrun_pass - env: - - name: ANSIBLE_BECOME_PASS - - name: ANSIBLE_PMRUN_PASS - ini: - - section: pmrun_become_plugin - key: password - notes: - - This plugin ignores the become_user supplied and uses pmrun's own configuration to select the user. -''' +DOCUMENTATION = r""" +name: pmrun +short_description: Privilege Manager run +description: + - This become plugins allows your remote/login user to execute commands as another user using the C(pmrun) utility. +author: Ansible Core Team +options: + become_exe: + description: C(pmrun) executable. + type: string + default: pmrun + ini: + - section: privilege_escalation + key: become_exe + - section: pmrun_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_pmrun_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_PMRUN_EXE + become_flags: + description: Options to pass to C(pmrun). + type: string + default: '' + ini: + - section: privilege_escalation + key: become_flags + - section: pmrun_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_pmrun_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_PMRUN_FLAGS + become_pass: + description: C(pmrun) password. + type: string + required: false + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_pmrun_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_PMRUN_PASS + ini: + - section: pmrun_become_plugin + key: password +notes: + - This plugin ignores the C(become_user) supplied and uses C(pmrun)'s own configuration to select the user. +""" +from shlex import quote as shlex_quote from ansible.plugins.become import BecomeBase -from ansible.module_utils.six.moves import shlex_quote class BecomeModule(BecomeBase): @@ -75,4 +76,4 @@ class BecomeModule(BecomeBase): become = self.get_option('become_exe') flags = self.get_option('become_flags') - return '%s %s %s' % (become, flags, shlex_quote(self._build_success_command(cmd, shell))) + return f'{become} {flags} {shlex_quote(self._build_success_command(cmd, shell))}' diff --git a/plugins/become/run0.py b/plugins/become/run0.py new file mode 100644 index 0000000000..4362d53ebf --- /dev/null +++ b/plugins/become/run0.py @@ -0,0 +1,126 @@ +# Copyright (c) 2024, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +name: run0 +short_description: Systemd's run0 +description: + - This become plugins allows your remote/login user to execute commands as another user using the C(run0) utility. +author: + - Thomas Sjögren (@konstruktoid) +version_added: '9.0.0' +options: + become_user: + description: User you 'become' to execute the task. + default: root + ini: + - section: privilege_escalation + key: become_user + - section: run0_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_run0_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_RUN0_USER + type: string + become_exe: + description: C(run0) executable. + default: run0 + ini: + - section: privilege_escalation + key: become_exe + - section: run0_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_run0_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_RUN0_EXE + type: string + become_flags: + description: Options to pass to C(run0). + default: '' + ini: + - section: privilege_escalation + key: become_flags + - section: run0_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_run0_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_RUN0_FLAGS + type: string +notes: + - This plugin only works when a C(polkit) rule is in place. +""" + +EXAMPLES = r""" +# An example polkit rule that allows the user 'ansible' in the 'wheel' group +# to execute commands using run0 without authentication. +/etc/polkit-1/rules.d/60-run0-fast-user-auth.rules: |- + polkit.addRule(function(action, subject) { + if(action.id == "org.freedesktop.systemd1.manage-units" && + subject.isInGroup("wheel") && + subject.user == "ansible") { + return polkit.Result.YES; + } + }); +""" + +from re import compile as re_compile + +from ansible.plugins.become import BecomeBase +from ansible.module_utils.common.text.converters import to_bytes + +ansi_color_codes = re_compile(to_bytes(r"\x1B\[[0-9;]+m")) + + +class BecomeModule(BecomeBase): + + name = "community.general.run0" + + prompt = "Password: " + fail = ("==== AUTHENTICATION FAILED ====",) + success = ("==== AUTHENTICATION COMPLETE ====",) + require_tty = ( + True # see https://github.com/ansible-collections/community.general/issues/6932 + ) + + @staticmethod + def remove_ansi_codes(line): + return ansi_color_codes.sub(b"", line) + + def build_become_command(self, cmd, shell): + super().build_become_command(cmd, shell) + + if not cmd: + return cmd + + become = self.get_option("become_exe") + flags = self.get_option("become_flags") + user = self.get_option("become_user") + + return ( + f"{become} --user={user} {flags} {self._build_success_command(cmd, shell)}" + ) + + def check_success(self, b_output): + b_output = self.remove_ansi_codes(b_output) + return super().check_success(b_output) + + def check_incorrect_password(self, b_output): + b_output = self.remove_ansi_codes(b_output) + return super().check_incorrect_password(b_output) + + def check_missing_password(self, b_output): + b_output = self.remove_ansi_codes(b_output) + return super().check_missing_password(b_output) diff --git a/plugins/become/sesu.py b/plugins/become/sesu.py index 5958c1bfca..ecd29c83c5 100644 --- a/plugins/become/sesu.py +++ b/plugins/become/sesu.py @@ -1,73 +1,75 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: sesu - short_description: CA Privileged Access Manager - description: - - This become plugins allows your remote/login user to execute commands as another user via the sesu utility. - author: ansible (@nekonyuu) - options: - become_user: - description: User you 'become' to execute the task - default: '' - ini: - - section: privilege_escalation - key: become_user - - section: sesu_become_plugin - key: user - vars: - - name: ansible_become_user - - name: ansible_sesu_user - env: - - name: ANSIBLE_BECOME_USER - - name: ANSIBLE_SESU_USER - become_exe: - description: sesu executable - default: sesu - ini: - - section: privilege_escalation - key: become_exe - - section: sesu_become_plugin - key: executable - vars: - - name: ansible_become_exe - - name: ansible_sesu_exe - env: - - name: ANSIBLE_BECOME_EXE - - name: ANSIBLE_SESU_EXE - become_flags: - description: Options to pass to sesu - default: -H -S -n - ini: - - section: privilege_escalation - key: become_flags - - section: sesu_become_plugin - key: flags - vars: - - name: ansible_become_flags - - name: ansible_sesu_flags - env: - - name: ANSIBLE_BECOME_FLAGS - - name: ANSIBLE_SESU_FLAGS - become_pass: - description: Password to pass to sesu - required: false - vars: - - name: ansible_become_password - - name: ansible_become_pass - - name: ansible_sesu_pass - env: - - name: ANSIBLE_BECOME_PASS - - name: ANSIBLE_SESU_PASS - ini: - - section: sesu_become_plugin - key: password -''' +DOCUMENTATION = r""" +name: sesu +short_description: CA Privileged Access Manager +description: + - This become plugins allows your remote/login user to execute commands as another user using the C(sesu) utility. +author: ansible (@nekonyuu) +options: + become_user: + description: User you 'become' to execute the task. + type: string + default: '' + ini: + - section: privilege_escalation + key: become_user + - section: sesu_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_sesu_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_SESU_USER + become_exe: + description: C(sesu) executable. + type: string + default: sesu + ini: + - section: privilege_escalation + key: become_exe + - section: sesu_become_plugin + key: executable + vars: + - name: ansible_become_exe + - name: ansible_sesu_exe + env: + - name: ANSIBLE_BECOME_EXE + - name: ANSIBLE_SESU_EXE + become_flags: + description: Options to pass to C(sesu). + type: string + default: -H -S -n + ini: + - section: privilege_escalation + key: become_flags + - section: sesu_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_sesu_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_SESU_FLAGS + become_pass: + description: Password to pass to C(sesu). + type: string + required: false + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_sesu_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_SESU_PASS + ini: + - section: sesu_become_plugin + key: password +""" from ansible.plugins.become import BecomeBase @@ -89,4 +91,4 @@ class BecomeModule(BecomeBase): flags = self.get_option('become_flags') user = self.get_option('become_user') - return '%s %s %s -c %s' % (become, flags, user, self._build_success_command(cmd, shell)) + return f'{become} {flags} {user} -c {self._build_success_command(cmd, shell)}' diff --git a/plugins/become/sudosu.py b/plugins/become/sudosu.py index 60bb2aa517..3b5d4d8b7f 100644 --- a/plugins/become/sudosu.py +++ b/plugins/become/sudosu.py @@ -1,60 +1,77 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2021, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = """ - name: sudosu - short_description: Run tasks using sudo su - +DOCUMENTATION = r""" +name: sudosu +short_description: Run tasks using sudo su - +description: + - This become plugin allows your remote/login user to execute commands as another user using the C(sudo) and C(su) utilities + combined. +author: + - Dag Wieers (@dagwieers) +version_added: 2.4.0 +options: + become_user: + description: User you 'become' to execute the task. + type: string + default: root + ini: + - section: privilege_escalation + key: become_user + - section: sudo_become_plugin + key: user + vars: + - name: ansible_become_user + - name: ansible_sudo_user + env: + - name: ANSIBLE_BECOME_USER + - name: ANSIBLE_SUDO_USER + become_flags: + description: Options to pass to C(sudo). + type: string + default: -H -S -n + ini: + - section: privilege_escalation + key: become_flags + - section: sudo_become_plugin + key: flags + vars: + - name: ansible_become_flags + - name: ansible_sudo_flags + env: + - name: ANSIBLE_BECOME_FLAGS + - name: ANSIBLE_SUDO_FLAGS + become_pass: + description: Password to pass to C(sudo). + type: string + required: false + vars: + - name: ansible_become_password + - name: ansible_become_pass + - name: ansible_sudo_pass + env: + - name: ANSIBLE_BECOME_PASS + - name: ANSIBLE_SUDO_PASS + ini: + - section: sudo_become_plugin + key: password + alt_method: description: - - This become plugin allows your remote/login user to execute commands as another user via the C(sudo) and C(su) utilities combined. - author: - - Dag Wieers (@dagwieers) - version_added: 2.4.0 - options: - become_user: - description: User you 'become' to execute the task. - default: root - ini: - - section: privilege_escalation - key: become_user - - section: sudo_become_plugin - key: user - vars: - - name: ansible_become_user - - name: ansible_sudo_user - env: - - name: ANSIBLE_BECOME_USER - - name: ANSIBLE_SUDO_USER - become_flags: - description: Options to pass to C(sudo). - default: -H -S -n - ini: - - section: privilege_escalation - key: become_flags - - section: sudo_become_plugin - key: flags - vars: - - name: ansible_become_flags - - name: ansible_sudo_flags - env: - - name: ANSIBLE_BECOME_FLAGS - - name: ANSIBLE_SUDO_FLAGS - become_pass: - description: Password to pass to C(sudo). - required: false - vars: - - name: ansible_become_password - - name: ansible_become_pass - - name: ansible_sudo_pass - env: - - name: ANSIBLE_BECOME_PASS - - name: ANSIBLE_SUDO_PASS - ini: - - section: sudo_become_plugin - key: password + - Whether to use an alternative method to call C(su). Instead of running C(su -l user /path/to/shell -c command), it + runs C(su -l user -c command). + - Use this when the default one is not working on your system. + required: false + type: boolean + ini: + - section: community.general.sudosu + key: alternative_method + vars: + - name: ansible_sudosu_alt_method + env: + - name: ANSIBLE_SUDOSU_ALT_METHOD + version_added: 9.2.0 """ @@ -80,13 +97,16 @@ class BecomeModule(BecomeBase): flags = self.get_option('become_flags') or '' prompt = '' if self.get_option('become_pass'): - self.prompt = '[sudo via ansible, key=%s] password:' % self._id + self.prompt = f'[sudo via ansible, key={self._id}] password:' if flags: # this could be simplified, but kept as is for now for backwards string matching flags = flags.replace('-n', '') - prompt = '-p "%s"' % (self.prompt) + prompt = f'-p "{self.prompt}"' user = self.get_option('become_user') or '' if user: - user = '%s' % (user) + user = f'{user}' - return ' '.join([becomecmd, flags, prompt, 'su -l', user, self._build_success_command(cmd, shell)]) + if self.get_option('alt_method'): + return f"{becomecmd} {flags} {prompt} su -l {user} -c {self._build_success_command(cmd, shell, True)}" + else: + return f"{becomecmd} {flags} {prompt} su -l {user} {self._build_success_command(cmd, shell)}" diff --git a/plugins/cache/memcached.py b/plugins/cache/memcached.py index 77f1717e45..28011e8cab 100644 --- a/plugins/cache/memcached.py +++ b/plugins/cache/memcached.py @@ -1,50 +1,50 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2014, Brian Coca, Josh Drake, et al # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: memcached - short_description: Use memcached DB for cache +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: memcached +short_description: Use memcached DB for cache +description: + - This cache uses JSON formatted, per host records saved in memcached. +requirements: + - memcache (python lib) +options: + _uri: description: - - This cache uses JSON formatted, per host records saved in memcached. - requirements: - - memcache (python lib) - options: - _uri: - description: - - List of connection information for the memcached DBs - default: ['127.0.0.1:11211'] - type: list - elements: string - env: - - name: ANSIBLE_CACHE_PLUGIN_CONNECTION - ini: - - key: fact_caching_connection - section: defaults - _prefix: - description: User defined prefix to use when creating the DB entries - default: ansible_facts - env: - - name: ANSIBLE_CACHE_PLUGIN_PREFIX - ini: - - key: fact_caching_prefix - section: defaults - _timeout: - default: 86400 - description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire - env: - - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT - ini: - - key: fact_caching_timeout - section: defaults - type: integer -''' + - List of connection information for the memcached DBs. + default: ['127.0.0.1:11211'] + type: list + elements: string + env: + - name: ANSIBLE_CACHE_PLUGIN_CONNECTION + ini: + - key: fact_caching_connection + section: defaults + _prefix: + description: User defined prefix to use when creating the DB entries. + type: string + default: ansible_facts + env: + - name: ANSIBLE_CACHE_PLUGIN_PREFIX + ini: + - key: fact_caching_prefix + section: defaults + _timeout: + default: 86400 + type: integer + # TODO: determine whether it is OK to change to: type: float + description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire. + env: + - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT + ini: + - key: fact_caching_timeout + section: defaults +""" import collections import os @@ -52,11 +52,9 @@ import time from multiprocessing import Lock from itertools import chain -from ansible import constants as C from ansible.errors import AnsibleError -from ansible.module_utils.common._collections_compat import MutableSet +from collections.abc import MutableSet from ansible.plugins.cache import BaseCacheModule -from ansible.release import __version__ as ansible_base_version from ansible.utils.display import Display try: @@ -191,7 +189,7 @@ class CacheModule(BaseCacheModule): self._keys = CacheModuleKeys(self._db, self._db.get(CacheModuleKeys.PREFIX) or []) def _make_key(self, key): - return "{0}{1}".format(self._prefix, key) + return f"{self._prefix}{key}" def _expire_keys(self): if self._timeout > 0: diff --git a/plugins/cache/pickle.py b/plugins/cache/pickle.py index 06b673921e..6c053138c8 100644 --- a/plugins/cache/pickle.py +++ b/plugins/cache/pickle.py @@ -1,52 +1,49 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017, Brian Coca # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: pickle - short_description: Pickle formatted files. +DOCUMENTATION = r""" +name: pickle +short_description: Pickle formatted files +description: + - This cache uses Python's pickle serialization format, in per host files, saved to the filesystem. +author: Brian Coca (@bcoca) +options: + _uri: + required: true description: - - This cache uses Python's pickle serialization format, in per host files, saved to the filesystem. - author: Brian Coca (@bcoca) - options: - _uri: - required: true - description: - - Path in which the cache plugin will save the files - env: - - name: ANSIBLE_CACHE_PLUGIN_CONNECTION - ini: - - key: fact_caching_connection - section: defaults - _prefix: - description: User defined prefix to use when creating the files - env: - - name: ANSIBLE_CACHE_PLUGIN_PREFIX - ini: - - key: fact_caching_prefix - section: defaults - _timeout: - default: 86400 - description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire - env: - - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT - ini: - - key: fact_caching_timeout - section: defaults -''' + - Path in which the cache plugin saves the files. + env: + - name: ANSIBLE_CACHE_PLUGIN_CONNECTION + ini: + - key: fact_caching_connection + section: defaults + type: path + _prefix: + description: User defined prefix to use when creating the files. + env: + - name: ANSIBLE_CACHE_PLUGIN_PREFIX + ini: + - key: fact_caching_prefix + section: defaults + type: string + _timeout: + default: 86400 + description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire. + env: + - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT + ini: + - key: fact_caching_timeout + section: defaults + type: float +""" -try: - import cPickle as pickle -except ImportError: - import pickle +import pickle -from ansible.module_utils.six import PY3 from ansible.plugins.cache import BaseFileCacheModule @@ -54,14 +51,12 @@ class CacheModule(BaseFileCacheModule): """ A caching module backed by pickle files. """ + _persistent = False # prevent unnecessary JSON serialization and key munging def _load(self, filepath): # Pickle is a binary format with open(filepath, 'rb') as f: - if PY3: - return pickle.load(f, encoding='bytes') - else: - return pickle.load(f) + return pickle.load(f, encoding='bytes') def _dump(self, value, filepath): with open(filepath, 'wb') as f: diff --git a/plugins/cache/redis.py b/plugins/cache/redis.py index 81e960cf18..d7b596bb32 100644 --- a/plugins/cache/redis.py +++ b/plugins/cache/redis.py @@ -1,78 +1,78 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2014, Brian Coca, Josh Drake, et al # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: redis - short_description: Use Redis DB for cache +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: redis +short_description: Use Redis DB for cache +description: + - This cache uses JSON formatted, per host records saved in Redis. +requirements: + - redis>=2.4.5 (python lib) +options: + _uri: description: - - This cache uses JSON formatted, per host records saved in Redis. - requirements: - - redis>=2.4.5 (python lib) - options: - _uri: - description: - - A colon separated string of connection information for Redis. - - The format is C(host:port:db:password), for example C(localhost:6379:0:changeme). - - To use encryption in transit, prefix the connection with C(tls://), as in C(tls://localhost:6379:0:changeme). - - To use redis sentinel, use separator C(;), for example C(localhost:26379;localhost:26379;0:changeme). Requires redis>=2.9.0. - required: true - env: - - name: ANSIBLE_CACHE_PLUGIN_CONNECTION - ini: - - key: fact_caching_connection - section: defaults - _prefix: - description: User defined prefix to use when creating the DB entries - default: ansible_facts - env: - - name: ANSIBLE_CACHE_PLUGIN_PREFIX - ini: - - key: fact_caching_prefix - section: defaults - _keyset_name: - description: User defined name for cache keyset name. - default: ansible_cache_keys - env: - - name: ANSIBLE_CACHE_REDIS_KEYSET_NAME - ini: - - key: fact_caching_redis_keyset_name - section: defaults - version_added: 1.3.0 - _sentinel_service_name: - description: The redis sentinel service name (or referenced as cluster name). - env: - - name: ANSIBLE_CACHE_REDIS_SENTINEL - ini: - - key: fact_caching_redis_sentinel - section: defaults - version_added: 1.3.0 - _timeout: - default: 86400 - description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire - env: - - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT - ini: - - key: fact_caching_timeout - section: defaults - type: integer -''' + - A colon separated string of connection information for Redis. + - The format is V(host:port:db:password), for example V(localhost:6379:0:changeme). + - To use encryption in transit, prefix the connection with V(tls://), as in V(tls://localhost:6379:0:changeme). + - To use redis sentinel, use separator V(;), for example V(localhost:26379;localhost:26379;0:changeme). Requires redis>=2.9.0. + type: string + required: true + env: + - name: ANSIBLE_CACHE_PLUGIN_CONNECTION + ini: + - key: fact_caching_connection + section: defaults + _prefix: + description: User defined prefix to use when creating the DB entries. + type: string + default: ansible_facts + env: + - name: ANSIBLE_CACHE_PLUGIN_PREFIX + ini: + - key: fact_caching_prefix + section: defaults + _keyset_name: + description: User defined name for cache keyset name. + type: string + default: ansible_cache_keys + env: + - name: ANSIBLE_CACHE_REDIS_KEYSET_NAME + ini: + - key: fact_caching_redis_keyset_name + section: defaults + version_added: 1.3.0 + _sentinel_service_name: + description: The redis sentinel service name (or referenced as cluster name). + type: string + env: + - name: ANSIBLE_CACHE_REDIS_SENTINEL + ini: + - key: fact_caching_redis_sentinel + section: defaults + version_added: 1.3.0 + _timeout: + default: 86400 + type: integer + # TODO: determine whether it is OK to change to: type: float + description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire. + env: + - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT + ini: + - key: fact_caching_timeout + section: defaults +""" import re import time import json -from ansible import constants as C from ansible.errors import AnsibleError -from ansible.module_utils.common.text.converters import to_native from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder from ansible.plugins.cache import BaseCacheModule -from ansible.release import __version__ as ansible_base_version from ansible.utils.display import Display try: @@ -128,7 +128,7 @@ class CacheModule(BaseCacheModule): connection = self._parse_connection(self.re_url_conn, uri) self._db = StrictRedis(*connection, **kw) - display.vv('Redis connection: %s' % self._db) + display.vv(f'Redis connection: {self._db}') @staticmethod def _parse_connection(re_patt, uri): @@ -152,7 +152,7 @@ class CacheModule(BaseCacheModule): # format: "localhost:26379;localhost2:26379;0:changeme" connections = uri.split(';') connection_args = connections.pop(-1) - if len(connection_args) > 0: # hanle if no db nr is given + if len(connection_args) > 0: # handle if no db nr is given connection_args = connection_args.split(':') kw['db'] = connection_args.pop(0) try: @@ -161,12 +161,12 @@ class CacheModule(BaseCacheModule): pass # password is optional sentinels = [self._parse_connection(self.re_sent_conn, shost) for shost in connections] - display.vv('\nUsing redis sentinels: %s' % sentinels) + display.vv(f'\nUsing redis sentinels: {sentinels}') scon = Sentinel(sentinels, **kw) try: return scon.master_for(self._sentinel_service_name, socket_timeout=0.2) except Exception as exc: - raise AnsibleError('Could not connect to redis sentinel: %s' % to_native(exc)) + raise AnsibleError(f'Could not connect to redis sentinel: {exc}') def _make_key(self, key): return self._prefix + key @@ -224,7 +224,7 @@ class CacheModule(BaseCacheModule): def copy(self): # TODO: there is probably a better way to do this in redis - ret = dict([(k, self.get(k)) for k in self.keys()]) + ret = {k: self.get(k) for k in self.keys()} return ret def __getstate__(self): diff --git a/plugins/cache/yaml.py b/plugins/cache/yaml.py index 3a5ddf3e6f..52cbf887de 100644 --- a/plugins/cache/yaml.py +++ b/plugins/cache/yaml.py @@ -1,49 +1,49 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017, Brian Coca # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: yaml - short_description: YAML formatted files. +DOCUMENTATION = r""" +name: yaml +short_description: YAML formatted files +description: + - This cache uses YAML formatted, per host, files saved to the filesystem. +author: Brian Coca (@bcoca) +options: + _uri: + required: true description: - - This cache uses YAML formatted, per host, files saved to the filesystem. - author: Brian Coca (@bcoca) - options: - _uri: - required: true - description: - - Path in which the cache plugin will save the files - env: - - name: ANSIBLE_CACHE_PLUGIN_CONNECTION - ini: - - key: fact_caching_connection - section: defaults - _prefix: - description: User defined prefix to use when creating the files - env: - - name: ANSIBLE_CACHE_PLUGIN_PREFIX - ini: - - key: fact_caching_prefix - section: defaults - _timeout: - default: 86400 - description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire - env: - - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT - ini: - - key: fact_caching_timeout - section: defaults - type: integer -''' + - Path in which the cache plugin saves the files. + env: + - name: ANSIBLE_CACHE_PLUGIN_CONNECTION + ini: + - key: fact_caching_connection + section: defaults + type: string + _prefix: + description: User defined prefix to use when creating the files. + env: + - name: ANSIBLE_CACHE_PLUGIN_PREFIX + ini: + - key: fact_caching_prefix + section: defaults + type: string + _timeout: + default: 86400 + description: Expiration timeout in seconds for the cache plugin data. Set to 0 to never expire. + env: + - name: ANSIBLE_CACHE_PLUGIN_TIMEOUT + ini: + - key: fact_caching_timeout + section: defaults + type: integer + # TODO: determine whether it is OK to change to: type: float +""" - -import codecs +import os import yaml @@ -58,9 +58,9 @@ class CacheModule(BaseFileCacheModule): """ def _load(self, filepath): - with codecs.open(filepath, 'r', encoding='utf-8') as f: + with open(os.path.abspath(filepath), 'r', encoding='utf-8') as f: return AnsibleLoader(f).get_single_data() def _dump(self, value, filepath): - with codecs.open(filepath, 'w', encoding='utf-8') as f: + with open(os.path.abspath(filepath), 'w', encoding='utf-8') as f: yaml.dump(value, f, Dumper=AnsibleDumper, default_flow_style=False) diff --git a/plugins/callback/cgroup_memory_recap.py b/plugins/callback/cgroup_memory_recap.py index ccdbcc9cf0..294ee4b378 100644 --- a/plugins/callback/cgroup_memory_recap.py +++ b/plugins/callback/cgroup_memory_recap.py @@ -1,44 +1,45 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018 Matt Martz # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: cgroup_memory_recap - type: aggregate - requirements: - - whitelist in configuration - - cgroups - short_description: Profiles maximum memory usage of tasks and full execution using cgroups - description: - - This is an ansible callback plugin that profiles maximum memory usage of ansible and individual tasks, and displays a recap at the end using cgroups. - notes: - - Requires ansible to be run from within a cgroup, such as with C(cgexec -g memory:ansible_profile ansible-playbook ...). - - This cgroup should only be used by ansible to get accurate results. - - To create the cgroup, first use a command such as C(sudo cgcreate -a ec2-user:ec2-user -t ec2-user:ec2-user -g memory:ansible_profile). - options: - max_mem_file: - required: true - description: Path to cgroups C(memory.max_usage_in_bytes) file. Example C(/sys/fs/cgroup/memory/ansible_profile/memory.max_usage_in_bytes). - env: - - name: CGROUP_MAX_MEM_FILE - ini: - - section: callback_cgroupmemrecap - key: max_mem_file - cur_mem_file: - required: true - description: Path to C(memory.usage_in_bytes) file. Example C(/sys/fs/cgroup/memory/ansible_profile/memory.usage_in_bytes). - env: - - name: CGROUP_CUR_MEM_FILE - ini: - - section: callback_cgroupmemrecap - key: cur_mem_file -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: cgroup_memory_recap +type: aggregate +requirements: + - whitelist in configuration + - cgroups +short_description: Profiles maximum memory usage of tasks and full execution using cgroups +description: + - This is an Ansible callback plugin that profiles maximum memory usage of Ansible and individual tasks, and displays a + recap at the end using cgroups. +notes: + - Requires ansible to be run from within a C(cgroup), such as with C(cgexec -g memory:ansible_profile ansible-playbook ...). + - This C(cgroup) should only be used by Ansible to get accurate results. + - To create the C(cgroup), first use a command such as C(sudo cgcreate -a ec2-user:ec2-user -t ec2-user:ec2-user -g memory:ansible_profile). +options: + max_mem_file: + required: true + description: Path to cgroups C(memory.max_usage_in_bytes) file. Example V(/sys/fs/cgroup/memory/ansible_profile/memory.max_usage_in_bytes). + type: str + env: + - name: CGROUP_MAX_MEM_FILE + ini: + - section: callback_cgroupmemrecap + key: max_mem_file + cur_mem_file: + required: true + description: Path to C(memory.usage_in_bytes) file. Example V(/sys/fs/cgroup/memory/ansible_profile/memory.usage_in_bytes). + type: str + env: + - name: CGROUP_CUR_MEM_FILE + ini: + - section: callback_cgroupmemrecap + key: cur_mem_file +""" import time import threading @@ -112,7 +113,7 @@ class CallbackModule(CallbackBase): max_results = int(f.read().strip()) / 1024 / 1024 self._display.banner('CGROUP MEMORY RECAP') - self._display.display('Execution Maximum: %0.2fMB\n\n' % max_results) + self._display.display(f'Execution Maximum: {max_results:0.2f}MB\n\n') for task, memory in self.task_results: - self._display.display('%s (%s): %0.2fMB' % (task.get_name(), task._uuid, memory)) + self._display.display(f'{task.get_name()} ({task._uuid}): {memory:0.2f}MB') diff --git a/plugins/callback/context_demo.py b/plugins/callback/context_demo.py index b9558fc064..f390a947a4 100644 --- a/plugins/callback/context_demo.py +++ b/plugins/callback/context_demo.py @@ -1,23 +1,21 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2012, Michael DeHaan, # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: context_demo - type: aggregate - short_description: demo callback that adds play/task context - description: - - Displays some play and task context along with normal output. - - This is mostly for demo purposes. - requirements: - - whitelist in configuration -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: context_demo +type: aggregate +short_description: Demo callback that adds play/task context +description: + - Displays some play and task context along with normal output. + - This is mostly for demo purposes. +requirements: + - whitelist in configuration +""" from ansible.plugins.callback import CallbackBase @@ -38,15 +36,15 @@ class CallbackModule(CallbackBase): self.play = None def v2_on_any(self, *args, **kwargs): - self._display.display("--- play: {0} task: {1} ---".format(getattr(self.play, 'name', None), self.task)) + self._display.display(f"--- play: {getattr(self.play, 'name', None)} task: {self.task} ---") self._display.display(" --- ARGS ") for i, a in enumerate(args): - self._display.display(' %s: %s' % (i, a)) + self._display.display(f' {i}: {a}') self._display.display(" --- KWARGS ") for k in kwargs: - self._display.display(' %s: %s' % (k, kwargs[k])) + self._display.display(f' {k}: {kwargs[k]}') def v2_playbook_on_play_start(self, play): self.play = play diff --git a/plugins/callback/counter_enabled.py b/plugins/callback/counter_enabled.py index 555ebd29a6..d5fe334a49 100644 --- a/plugins/callback/counter_enabled.py +++ b/plugins/callback/counter_enabled.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Ivan Aragones Muniesa # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -6,28 +5,26 @@ Counter enabled Ansible callback plugin (See DOCUMENTATION for more information) ''' -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: counter_enabled - type: stdout - short_description: adds counters to the output items (tasks and hosts/task) - description: - - Use this callback when you need a kind of progress bar on a large environments. - - You will know how many tasks has the playbook to run, and which one is actually running. - - You will know how many hosts may run a task, and which of them is actually running. - extends_documentation_fragment: - - default_callback - requirements: - - set as stdout callback in C(ansible.cfg) (C(stdout_callback = counter_enabled)) -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: counter_enabled +type: stdout +short_description: Adds counters to the output items (tasks and hosts/task) +description: + - Use this callback when you need a kind of progress bar on a large environments. + - You can see how many tasks has the playbook to run, and which one is actually running. + - You can see how many hosts may run a task, and which of them is actually running. +extends_documentation_fragment: + - default_callback +requirements: + - set as stdout callback in C(ansible.cfg) (C(stdout_callback = counter_enabled)) +""" from ansible import constants as C from ansible.plugins.callback import CallbackBase from ansible.utils.color import colorize, hostcolor -from ansible.template import Templar from ansible.playbook.task_include import TaskInclude @@ -70,9 +67,9 @@ class CallbackModule(CallbackBase): def v2_playbook_on_play_start(self, play): name = play.get_name().strip() if not name: - msg = u"play" + msg = "play" else: - msg = u"PLAY [%s]" % name + msg = f"PLAY [{name}]" self._play = play @@ -92,25 +89,17 @@ class CallbackModule(CallbackBase): for host in hosts: stat = stats.summarize(host) - self._display.display(u"%s : %s %s %s %s %s %s" % ( - hostcolor(host, stat), - colorize(u'ok', stat['ok'], C.COLOR_OK), - colorize(u'changed', stat['changed'], C.COLOR_CHANGED), - colorize(u'unreachable', stat['unreachable'], C.COLOR_UNREACHABLE), - colorize(u'failed', stat['failures'], C.COLOR_ERROR), - colorize(u'rescued', stat['rescued'], C.COLOR_OK), - colorize(u'ignored', stat['ignored'], C.COLOR_WARN)), + self._display.display( + f"{hostcolor(host, stat)} : {colorize('ok', stat['ok'], C.COLOR_OK)} {colorize('changed', stat['changed'], C.COLOR_CHANGED)} " + f"{colorize('unreachable', stat['unreachable'], C.COLOR_UNREACHABLE)} {colorize('failed', stat['failures'], C.COLOR_ERROR)} " + f"{colorize('rescued', stat['rescued'], C.COLOR_OK)} {colorize('ignored', stat['ignored'], C.COLOR_WARN)}", screen_only=True ) - self._display.display(u"%s : %s %s %s %s %s %s" % ( - hostcolor(host, stat, False), - colorize(u'ok', stat['ok'], None), - colorize(u'changed', stat['changed'], None), - colorize(u'unreachable', stat['unreachable'], None), - colorize(u'failed', stat['failures'], None), - colorize(u'rescued', stat['rescued'], None), - colorize(u'ignored', stat['ignored'], None)), + self._display.display( + f"{hostcolor(host, stat, False)} : {colorize('ok', stat['ok'], None)} {colorize('changed', stat['changed'], None)} " + f"{colorize('unreachable', stat['unreachable'], None)} {colorize('failed', stat['failures'], None)} " + f"{colorize('rescued', stat['rescued'], None)} {colorize('ignored', stat['ignored'], None)}", log_only=True ) @@ -125,12 +114,14 @@ class CallbackModule(CallbackBase): for k in sorted(stats.custom.keys()): if k == '_run': continue - self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', ''))) + _custom_stats = self._dump_results(stats.custom[k], indent=1).replace('\n', '') + self._display.display(f'\t{k}: {_custom_stats}') # print per run custom stats if '_run' in stats.custom: self._display.display("", screen_only=True) - self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', '')) + _custom_stats_run = self._dump_results(stats.custom['_run'], indent=1).replace('\n', '') + self._display.display(f'\tRUN: {_custom_stats_run}') self._display.display("", screen_only=True) def v2_playbook_on_task_start(self, task, is_conditional): @@ -144,13 +135,13 @@ class CallbackModule(CallbackBase): # that they can secure this if they feel that their stdout is insecure # (shoulder surfing, logging stdout straight to a file, etc). if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT: - args = ', '.join(('%s=%s' % a for a in task.args.items())) - args = ' %s' % args - self._display.banner("TASK %d/%d [%s%s]" % (self._task_counter, self._task_total, task.get_name().strip(), args)) + args = ', '.join(('{k}={v}' for k, v in task.args.items())) + args = f' {args}' + self._display.banner(f"TASK {self._task_counter}/{self._task_total} [{task.get_name().strip()}{args}]") if self._display.verbosity >= 2: path = task.get_path() if path: - self._display.display("task path: %s" % path, color=C.COLOR_DEBUG) + self._display.display(f"task path: {path}", color=C.COLOR_DEBUG) self._host_counter = self._previous_batch_total self._task_counter += 1 @@ -167,15 +158,15 @@ class CallbackModule(CallbackBase): return elif result._result.get('changed', False): if delegated_vars: - msg = "changed: %d/%d [%s -> %s]" % (self._host_counter, self._host_total, result._host.get_name(), delegated_vars['ansible_host']) + msg = f"changed: {self._host_counter}/{self._host_total} [{result._host.get_name()} -> {delegated_vars['ansible_host']}]" else: - msg = "changed: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name()) + msg = f"changed: {self._host_counter}/{self._host_total} [{result._host.get_name()}]" color = C.COLOR_CHANGED else: if delegated_vars: - msg = "ok: %d/%d [%s -> %s]" % (self._host_counter, self._host_total, result._host.get_name(), delegated_vars['ansible_host']) + msg = f"ok: {self._host_counter}/{self._host_total} [{result._host.get_name()} -> {delegated_vars['ansible_host']}]" else: - msg = "ok: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name()) + msg = f"ok: {self._host_counter}/{self._host_total} [{result._host.get_name()}]" color = C.COLOR_OK self._handle_warnings(result._result) @@ -186,7 +177,7 @@ class CallbackModule(CallbackBase): self._clean_results(result._result, result._task.action) if self._run_is_verbose(result): - msg += " => %s" % (self._dump_results(result._result),) + msg += f" => {self._dump_results(result._result)}" self._display.display(msg, color=color) def v2_runner_on_failed(self, result, ignore_errors=False): @@ -207,14 +198,16 @@ class CallbackModule(CallbackBase): else: if delegated_vars: - self._display.display("fatal: %d/%d [%s -> %s]: FAILED! => %s" % (self._host_counter, self._host_total, - result._host.get_name(), delegated_vars['ansible_host'], - self._dump_results(result._result)), - color=C.COLOR_ERROR) + self._display.display( + f"fatal: {self._host_counter}/{self._host_total} [{result._host.get_name()} -> " + f"{delegated_vars['ansible_host']}]: FAILED! => {self._dump_results(result._result)}", + color=C.COLOR_ERROR + ) else: - self._display.display("fatal: %d/%d [%s]: FAILED! => %s" % (self._host_counter, self._host_total, - result._host.get_name(), self._dump_results(result._result)), - color=C.COLOR_ERROR) + self._display.display( + f"fatal: {self._host_counter}/{self._host_total} [{result._host.get_name()}]: FAILED! => {self._dump_results(result._result)}", + color=C.COLOR_ERROR + ) if ignore_errors: self._display.display("...ignoring", color=C.COLOR_SKIP) @@ -232,9 +225,9 @@ class CallbackModule(CallbackBase): if result._task.loop and 'results' in result._result: self._process_items(result) else: - msg = "skipping: %d/%d [%s]" % (self._host_counter, self._host_total, result._host.get_name()) + msg = f"skipping: {self._host_counter}/{self._host_total} [{result._host.get_name()}]" if self._run_is_verbose(result): - msg += " => %s" % self._dump_results(result._result) + msg += f" => {self._dump_results(result._result)}" self._display.display(msg, color=C.COLOR_SKIP) def v2_runner_on_unreachable(self, result): @@ -245,11 +238,13 @@ class CallbackModule(CallbackBase): delegated_vars = result._result.get('_ansible_delegated_vars', None) if delegated_vars: - self._display.display("fatal: %d/%d [%s -> %s]: UNREACHABLE! => %s" % (self._host_counter, self._host_total, - result._host.get_name(), delegated_vars['ansible_host'], - self._dump_results(result._result)), - color=C.COLOR_UNREACHABLE) + self._display.display( + f"fatal: {self._host_counter}/{self._host_total} [{result._host.get_name()} -> " + f"{delegated_vars['ansible_host']}]: UNREACHABLE! => {self._dump_results(result._result)}", + color=C.COLOR_UNREACHABLE + ) else: - self._display.display("fatal: %d/%d [%s]: UNREACHABLE! => %s" % (self._host_counter, self._host_total, - result._host.get_name(), self._dump_results(result._result)), - color=C.COLOR_UNREACHABLE) + self._display.display( + f"fatal: {self._host_counter}/{self._host_total} [{result._host.get_name()}]: UNREACHABLE! => {self._dump_results(result._result)}", + color=C.COLOR_UNREACHABLE + ) diff --git a/plugins/callback/default_without_diff.py b/plugins/callback/default_without_diff.py new file mode 100644 index 0000000000..b0315829b5 --- /dev/null +++ b/plugins/callback/default_without_diff.py @@ -0,0 +1,43 @@ + +# Copyright (c) 2024, Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: default_without_diff +type: stdout +short_description: The default ansible callback without diff output +version_added: 8.4.0 +description: + - This is basically the default ansible callback plugin (P(ansible.builtin.default#callback)) without showing diff output. + This can be useful when using another callback which sends more detailed information to another service, like the L(ARA, + https://ara.recordsansible.org/) callback, and you want diff output sent to that plugin but not shown on the console output. +author: Felix Fontein (@felixfontein) +extends_documentation_fragment: + - ansible.builtin.default_callback + - ansible.builtin.result_format_callback +""" + +EXAMPLES = r""" +# Enable callback in ansible.cfg: +ansible_config: | + [defaults] + stdout_callback = community.general.default_without_diff + +# Enable callback with environment variables: +environment_variable: |- + ANSIBLE_STDOUT_CALLBACK=community.general.default_without_diff +""" + +from ansible.plugins.callback.default import CallbackModule as Default + + +class CallbackModule(Default): + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'community.general.default_without_diff' + + def v2_on_file_diff(self, result): + pass diff --git a/plugins/callback/dense.py b/plugins/callback/dense.py index 490705fd27..de50d97ce1 100644 --- a/plugins/callback/dense.py +++ b/plugins/callback/dense.py @@ -1,25 +1,23 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2016, Dag Wieers # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" name: dense type: stdout -short_description: minimal stdout output +short_description: Minimal stdout output extends_documentation_fragment: -- default_callback + - default_callback description: -- When in verbose mode it will act the same as the default callback. + - When in verbose mode it acts the same as the default callback. author: -- Dag Wieers (@dagwieers) + - Dag Wieers (@dagwieers) requirements: -- set as stdout in configuration -''' + - set as stdout in configuration +""" HAS_OD = False try: @@ -28,8 +26,7 @@ try: except ImportError: pass -from ansible.module_utils.six import binary_type, text_type -from ansible.module_utils.common._collections_compat import MutableMapping, MutableSequence +from collections.abc import MutableMapping, MutableSequence from ansible.plugins.callback.default import CallbackModule as CallbackModule_default from ansible.utils.color import colorize, hostcolor from ansible.utils.display import Display @@ -195,7 +192,7 @@ class CallbackModule(CallbackModule_default): self.disabled = True def __del__(self): - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") def _add_host(self, result, status): name = result._host.get_name() @@ -237,13 +234,13 @@ class CallbackModule(CallbackModule_default): # Remove empty attributes (list, dict, str) for attr in result.copy(): - if isinstance(result[attr], (MutableSequence, MutableMapping, binary_type, text_type)): + if isinstance(result[attr], (MutableSequence, MutableMapping, bytes, str)): if not result[attr]: del result[attr] def _handle_exceptions(self, result): if 'exception' in result: - # Remove the exception from the result so it's not shown every time + # Remove the exception from the result so it is not shown every time del result['exception'] if self._display.verbosity == 1: @@ -252,7 +249,7 @@ class CallbackModule(CallbackModule_default): def _display_progress(self, result=None): # Always rewrite the complete line sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.nolinewrap + vt100.underline) - sys.stdout.write('%s %d:' % (self.type, self.count[self.type])) + sys.stdout.write(f'{self.type} {self.count[self.type]}:') sys.stdout.write(vt100.reset) sys.stdout.flush() @@ -260,22 +257,18 @@ class CallbackModule(CallbackModule_default): for name in self.hosts: sys.stdout.write(' ') if self.hosts[name].get('delegate', None): - sys.stdout.write(self.hosts[name]['delegate'] + '>') + sys.stdout.write(f"{self.hosts[name]['delegate']}>") sys.stdout.write(colors[self.hosts[name]['state']] + name + vt100.reset) sys.stdout.flush() -# if result._result.get('diff', False): -# sys.stdout.write('\n' + vt100.linewrap) sys.stdout.write(vt100.linewrap) -# self.keep = True - def _display_task_banner(self): if not self.shown_title: self.shown_title = True sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline) - sys.stdout.write('%s %d: %s' % (self.type, self.count[self.type], self.task.get_name().strip())) - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f'{self.type} {self.count[self.type]}: {self.task.get_name().strip()}') + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") sys.stdout.flush() else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline) @@ -284,7 +277,7 @@ class CallbackModule(CallbackModule_default): def _display_results(self, result, status): # Leave the previous task on screen (as it has changes/errors) if self._display.verbosity == 0 and self.keep: - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline) self.keep = False @@ -309,16 +302,16 @@ class CallbackModule(CallbackModule_default): if result._task.loop and 'results' in result._result: self._process_items(result) else: - sys.stdout.write(colors[status] + status + ': ') + sys.stdout.write(f"{colors[status] + status}: ") delegated_vars = result._result.get('_ansible_delegated_vars', None) if delegated_vars: - sys.stdout.write(vt100.reset + result._host.get_name() + '>' + colors[status] + delegated_vars['ansible_host']) + sys.stdout.write(f"{vt100.reset}{result._host.get_name()}>{colors[status]}{delegated_vars['ansible_host']}") else: sys.stdout.write(result._host.get_name()) - sys.stdout.write(': ' + dump + '\n') - sys.stdout.write(vt100.reset + vt100.save + vt100.clearline) + sys.stdout.write(f": {dump}\n") + sys.stdout.write(f"{vt100.reset}{vt100.save}{vt100.clearline}") sys.stdout.flush() if status == 'changed': @@ -327,7 +320,7 @@ class CallbackModule(CallbackModule_default): def v2_playbook_on_play_start(self, play): # Leave the previous task on screen (as it has changes/errors) if self._display.verbosity == 0 and self.keep: - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.bold) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}{vt100.bold}") else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.bold) @@ -341,14 +334,14 @@ class CallbackModule(CallbackModule_default): name = play.get_name().strip() if not name: name = 'unnamed' - sys.stdout.write('PLAY %d: %s' % (self.count['play'], name.upper())) - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f"PLAY {self.count['play']}: {name.upper()}") + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") sys.stdout.flush() def v2_playbook_on_task_start(self, task, is_conditional): # Leave the previous task on screen (as it has changes/errors) if self._display.verbosity == 0 and self.keep: - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}{vt100.underline}") else: # Do not clear line, since we want to retain the previous output sys.stdout.write(vt100.restore + vt100.reset + vt100.underline) @@ -365,14 +358,14 @@ class CallbackModule(CallbackModule_default): self.count['task'] += 1 # Write the next task on screen (behind the prompt is the previous output) - sys.stdout.write('%s %d.' % (self.type, self.count[self.type])) + sys.stdout.write(f'{self.type} {self.count[self.type]}.') sys.stdout.write(vt100.reset) sys.stdout.flush() def v2_playbook_on_handler_task_start(self, task): # Leave the previous task on screen (as it has changes/errors) if self._display.verbosity == 0 and self.keep: - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline + vt100.underline) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}{vt100.underline}") else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline + vt100.underline) @@ -388,7 +381,7 @@ class CallbackModule(CallbackModule_default): self.count[self.type] += 1 # Write the next task on screen (behind the prompt is the previous output) - sys.stdout.write('%s %d.' % (self.type, self.count[self.type])) + sys.stdout.write(f'{self.type} {self.count[self.type]}.') sys.stdout.write(vt100.reset) sys.stdout.flush() @@ -451,13 +444,13 @@ class CallbackModule(CallbackModule_default): def v2_playbook_on_no_hosts_remaining(self): if self._display.verbosity == 0 and self.keep: - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline) self.keep = False - sys.stdout.write(vt100.white + vt100.redbg + 'NO MORE HOSTS LEFT') - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f"{vt100.white + vt100.redbg}NO MORE HOSTS LEFT") + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") sys.stdout.flush() def v2_playbook_on_include(self, included_file): @@ -465,7 +458,7 @@ class CallbackModule(CallbackModule_default): def v2_playbook_on_stats(self, stats): if self._display.verbosity == 0 and self.keep: - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") else: sys.stdout.write(vt100.restore + vt100.reset + vt100.clearline) @@ -476,22 +469,16 @@ class CallbackModule(CallbackModule_default): sys.stdout.write(vt100.bold + vt100.underline) sys.stdout.write('SUMMARY') - sys.stdout.write(vt100.restore + vt100.reset + '\n' + vt100.save + vt100.clearline) + sys.stdout.write(f"{vt100.restore}{vt100.reset}\n{vt100.save}{vt100.clearline}") sys.stdout.flush() hosts = sorted(stats.processed.keys()) for h in hosts: t = stats.summarize(h) self._display.display( - u"%s : %s %s %s %s %s %s" % ( - hostcolor(h, t), - colorize(u'ok', t['ok'], C.COLOR_OK), - colorize(u'changed', t['changed'], C.COLOR_CHANGED), - colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE), - colorize(u'failed', t['failures'], C.COLOR_ERROR), - colorize(u'rescued', t['rescued'], C.COLOR_OK), - colorize(u'ignored', t['ignored'], C.COLOR_WARN), - ), + f"{hostcolor(h, t)} : {colorize('ok', t['ok'], C.COLOR_OK)} {colorize('changed', t['changed'], C.COLOR_CHANGED)} " + f"{colorize('unreachable', t['unreachable'], C.COLOR_UNREACHABLE)} {colorize('failed', t['failures'], C.COLOR_ERROR)} " + f"{colorize('rescued', t['rescued'], C.COLOR_OK)} {colorize('ignored', t['ignored'], C.COLOR_WARN)}", screen_only=True ) diff --git a/plugins/callback/diy.py b/plugins/callback/diy.py index 55a07725f2..c94fe25093 100644 --- a/plugins/callback/diy.py +++ b/plugins/callback/diy.py @@ -1,608 +1,601 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2019, Trevor Highfill # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' - name: diy - type: stdout - short_description: Customize the output - version_added: 0.2.0 - description: - - Callback plugin that allows you to supply your own custom callback templates to be output. - author: Trevor Highfill (@theque5t) - extends_documentation_fragment: - - default_callback - notes: - - Uses the C(default) callback plugin output when a custom callback message(C(msg)) is not provided. - - Makes the callback event data available via the C(ansible_callback_diy) dictionary, which can be used in the templating context for the options. - The dictionary is only available in the templating context for the options. It is not a variable that is available via the other - various execution contexts, such as playbook, play, task etc. - - Options being set by their respective variable input can only be set using the variable if the variable was set in a context that is available to the - respective callback. - Use the C(ansible_callback_diy) dictionary to see what is available to a callback. Additionally, C(ansible_callback_diy.top_level_var_names) will output - the top level variable names available to the callback. - - Each option value is rendered as a template before being evaluated. This allows for the dynamic usage of an option. For example, - C("{{ 'yellow' if ansible_callback_diy.result.is_changed else 'bright green' }}") - - "**Condition** for all C(msg) options: - if value C(is None or omit), - then the option is not being used. - **Effect**: use of the C(default) callback plugin for output" - - "**Condition** for all C(msg) options: - if value C(is not None and not omit and length is not greater than 0), - then the option is being used without output. - **Effect**: suppress output" - - "**Condition** for all C(msg) options: - if value C(is not None and not omit and length is greater than 0), - then the option is being used with output. - **Effect**: render value as template and output" - - "Valid color values: C(black), C(bright gray), C(blue), C(white), C(green), C(bright blue), C(cyan), C(bright green), C(red), C(bright cyan), - C(purple), C(bright red), C(yellow), C(bright purple), C(dark gray), C(bright yellow), C(magenta), C(bright magenta), C(normal)" - seealso: - - name: default – default Ansible screen output - description: The official documentation on the B(default) callback plugin. - link: https://docs.ansible.com/ansible/latest/plugins/callback/default.html - requirements: - - set as stdout_callback in configuration - options: - on_any_msg: - description: Output to be used for callback on_any. - ini: - - section: callback_diy - key: on_any_msg - env: - - name: ANSIBLE_CALLBACK_DIY_ON_ANY_MSG - vars: - - name: ansible_callback_diy_on_any_msg - type: str +DOCUMENTATION = r""" +name: diy +type: stdout +short_description: Customize the output +version_added: 0.2.0 +description: + - Callback plugin that allows you to supply your own custom callback templates to be output. +author: Trevor Highfill (@theque5t) +extends_documentation_fragment: + - default_callback +notes: + - Uses the P(ansible.builtin.default#callback) callback plugin output when a custom callback V(message(msg\)) is not provided. + - Makes the callback event data available using the C(ansible_callback_diy) dictionary, which can be used in the templating + context for the options. The dictionary is only available in the templating context for the options. It is not a variable + that is available using the other various execution contexts, such as playbook, play, task, and so on so forth. + - Options being set by their respective variable input can only be set using the variable if the variable was set in a context + that is available to the respective callback. Use the C(ansible_callback_diy) dictionary to see what is available to a + callback. Additionally, C(ansible_callback_diy.top_level_var_names) outputs the top level variable names available + to the callback. + - Each option value is rendered as a template before being evaluated. This allows for the dynamic usage of an option. For + example, V("{{ 'yellow' if ansible_callback_diy.result.is_changed else 'bright green' }}"). + - 'B(Condition) for all C(msg) options: if value V(is None or omit), then the option is not being used. B(Effect): use of + the C(default) callback plugin for output.' + - 'B(Condition) for all C(msg) options: if value V(is not None and not omit and length is not greater than 0), then the + option is being used without output. B(Effect): suppress output.' + - 'B(Condition) for all C(msg) options: if value V(is not None and not omit and length is greater than 0), then the option + is being used with output. B(Effect): render value as template and output.' + - 'Valid color values: V(black), V(bright gray), V(blue), V(white), V(green), V(bright blue), V(cyan), V(bright green), + V(red), V(bright cyan), V(purple), V(bright red), V(yellow), V(bright purple), V(dark gray), V(bright yellow), V(magenta), + V(bright magenta), V(normal).' +seealso: + - name: default – default Ansible screen output + description: The official documentation on the B(default) callback plugin. + link: https://docs.ansible.com/ansible/latest/plugins/callback/default.html +requirements: + - set as stdout_callback in configuration +options: + on_any_msg: + description: Output to be used for callback on_any. + ini: + - section: callback_diy + key: on_any_msg + env: + - name: ANSIBLE_CALLBACK_DIY_ON_ANY_MSG + vars: + - name: ansible_callback_diy_on_any_msg + type: str - on_any_msg_color: - description: - - Output color to be used for I(on_any_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: on_any_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_ON_ANY_MSG_COLOR - vars: - - name: ansible_callback_diy_on_any_msg_color - type: str + on_any_msg_color: + description: + - Output color to be used for O(on_any_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: on_any_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_ON_ANY_MSG_COLOR + vars: + - name: ansible_callback_diy_on_any_msg_color + type: str - runner_on_failed_msg: - description: Output to be used for callback runner_on_failed. - ini: - - section: callback_diy - key: runner_on_failed_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_FAILED_MSG - vars: - - name: ansible_callback_diy_runner_on_failed_msg - type: str + runner_on_failed_msg: + description: Output to be used for callback runner_on_failed. + ini: + - section: callback_diy + key: runner_on_failed_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_FAILED_MSG + vars: + - name: ansible_callback_diy_runner_on_failed_msg + type: str - runner_on_failed_msg_color: - description: - - Output color to be used for I(runner_on_failed_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_on_failed_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_FAILED_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_on_failed_msg_color - type: str + runner_on_failed_msg_color: + description: + - Output color to be used for O(runner_on_failed_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_on_failed_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_FAILED_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_on_failed_msg_color + type: str - runner_on_ok_msg: - description: Output to be used for callback runner_on_ok. - ini: - - section: callback_diy - key: runner_on_ok_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_OK_MSG - vars: - - name: ansible_callback_diy_runner_on_ok_msg - type: str + runner_on_ok_msg: + description: Output to be used for callback runner_on_ok. + ini: + - section: callback_diy + key: runner_on_ok_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_OK_MSG + vars: + - name: ansible_callback_diy_runner_on_ok_msg + type: str - runner_on_ok_msg_color: - description: - - Output color to be used for I(runner_on_ok_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_on_ok_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_OK_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_on_ok_msg_color - type: str + runner_on_ok_msg_color: + description: + - Output color to be used for O(runner_on_ok_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_on_ok_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_OK_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_on_ok_msg_color + type: str - runner_on_skipped_msg: - description: Output to be used for callback runner_on_skipped. - ini: - - section: callback_diy - key: runner_on_skipped_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_SKIPPED_MSG - vars: - - name: ansible_callback_diy_runner_on_skipped_msg - type: str + runner_on_skipped_msg: + description: Output to be used for callback runner_on_skipped. + ini: + - section: callback_diy + key: runner_on_skipped_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_SKIPPED_MSG + vars: + - name: ansible_callback_diy_runner_on_skipped_msg + type: str - runner_on_skipped_msg_color: - description: - - Output color to be used for I(runner_on_skipped_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_on_skipped_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_SKIPPED_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_on_skipped_msg_color - type: str + runner_on_skipped_msg_color: + description: + - Output color to be used for O(runner_on_skipped_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_on_skipped_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_SKIPPED_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_on_skipped_msg_color + type: str - runner_on_unreachable_msg: - description: Output to be used for callback runner_on_unreachable. - ini: - - section: callback_diy - key: runner_on_unreachable_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_UNREACHABLE_MSG - vars: - - name: ansible_callback_diy_runner_on_unreachable_msg - type: str + runner_on_unreachable_msg: + description: Output to be used for callback runner_on_unreachable. + ini: + - section: callback_diy + key: runner_on_unreachable_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_UNREACHABLE_MSG + vars: + - name: ansible_callback_diy_runner_on_unreachable_msg + type: str - runner_on_unreachable_msg_color: - description: - - Output color to be used for I(runner_on_unreachable_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_on_unreachable_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_UNREACHABLE_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_on_unreachable_msg_color - type: str + runner_on_unreachable_msg_color: + description: + - Output color to be used for O(runner_on_unreachable_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_on_unreachable_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_UNREACHABLE_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_on_unreachable_msg_color + type: str - playbook_on_start_msg: - description: Output to be used for callback playbook_on_start. - ini: - - section: callback_diy - key: playbook_on_start_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_START_MSG - vars: - - name: ansible_callback_diy_playbook_on_start_msg - type: str + playbook_on_start_msg: + description: Output to be used for callback playbook_on_start. + ini: + - section: callback_diy + key: playbook_on_start_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_START_MSG + vars: + - name: ansible_callback_diy_playbook_on_start_msg + type: str - playbook_on_start_msg_color: - description: - - Output color to be used for I(playbook_on_start_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_start_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_START_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_start_msg_color - type: str + playbook_on_start_msg_color: + description: + - Output color to be used for O(playbook_on_start_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_start_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_START_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_start_msg_color + type: str - playbook_on_notify_msg: - description: Output to be used for callback playbook_on_notify. - ini: - - section: callback_diy - key: playbook_on_notify_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NOTIFY_MSG - vars: - - name: ansible_callback_diy_playbook_on_notify_msg - type: str + playbook_on_notify_msg: + description: Output to be used for callback playbook_on_notify. + ini: + - section: callback_diy + key: playbook_on_notify_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NOTIFY_MSG + vars: + - name: ansible_callback_diy_playbook_on_notify_msg + type: str - playbook_on_notify_msg_color: - description: - - Output color to be used for I(playbook_on_notify_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_notify_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NOTIFY_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_notify_msg_color - type: str + playbook_on_notify_msg_color: + description: + - Output color to be used for O(playbook_on_notify_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_notify_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NOTIFY_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_notify_msg_color + type: str - playbook_on_no_hosts_matched_msg: - description: Output to be used for callback playbook_on_no_hosts_matched. - ini: - - section: callback_diy - key: playbook_on_no_hosts_matched_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_MATCHED_MSG - vars: - - name: ansible_callback_diy_playbook_on_no_hosts_matched_msg - type: str + playbook_on_no_hosts_matched_msg: + description: Output to be used for callback playbook_on_no_hosts_matched. + ini: + - section: callback_diy + key: playbook_on_no_hosts_matched_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_MATCHED_MSG + vars: + - name: ansible_callback_diy_playbook_on_no_hosts_matched_msg + type: str - playbook_on_no_hosts_matched_msg_color: - description: - - Output color to be used for I(playbook_on_no_hosts_matched_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_no_hosts_matched_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_MATCHED_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_no_hosts_matched_msg_color - type: str + playbook_on_no_hosts_matched_msg_color: + description: + - Output color to be used for O(playbook_on_no_hosts_matched_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_no_hosts_matched_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_MATCHED_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_no_hosts_matched_msg_color + type: str - playbook_on_no_hosts_remaining_msg: - description: Output to be used for callback playbook_on_no_hosts_remaining. - ini: - - section: callback_diy - key: playbook_on_no_hosts_remaining_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_REMAINING_MSG - vars: - - name: ansible_callback_diy_playbook_on_no_hosts_remaining_msg - type: str + playbook_on_no_hosts_remaining_msg: + description: Output to be used for callback playbook_on_no_hosts_remaining. + ini: + - section: callback_diy + key: playbook_on_no_hosts_remaining_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_REMAINING_MSG + vars: + - name: ansible_callback_diy_playbook_on_no_hosts_remaining_msg + type: str - playbook_on_no_hosts_remaining_msg_color: - description: - - Output color to be used for I(playbook_on_no_hosts_remaining_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_no_hosts_remaining_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_REMAINING_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_no_hosts_remaining_msg_color - type: str + playbook_on_no_hosts_remaining_msg_color: + description: + - Output color to be used for O(playbook_on_no_hosts_remaining_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_no_hosts_remaining_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_NO_HOSTS_REMAINING_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_no_hosts_remaining_msg_color + type: str - playbook_on_task_start_msg: - description: Output to be used for callback playbook_on_task_start. - ini: - - section: callback_diy - key: playbook_on_task_start_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_TASK_START_MSG - vars: - - name: ansible_callback_diy_playbook_on_task_start_msg - type: str + playbook_on_task_start_msg: + description: Output to be used for callback playbook_on_task_start. + ini: + - section: callback_diy + key: playbook_on_task_start_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_TASK_START_MSG + vars: + - name: ansible_callback_diy_playbook_on_task_start_msg + type: str - playbook_on_task_start_msg_color: - description: - - Output color to be used for I(playbook_on_task_start_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_task_start_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_TASK_START_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_task_start_msg_color - type: str + playbook_on_task_start_msg_color: + description: + - Output color to be used for O(playbook_on_task_start_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_task_start_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_TASK_START_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_task_start_msg_color + type: str - playbook_on_handler_task_start_msg: - description: Output to be used for callback playbook_on_handler_task_start. - ini: - - section: callback_diy - key: playbook_on_handler_task_start_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_HANDLER_TASK_START_MSG - vars: - - name: ansible_callback_diy_playbook_on_handler_task_start_msg - type: str + playbook_on_handler_task_start_msg: + description: Output to be used for callback playbook_on_handler_task_start. + ini: + - section: callback_diy + key: playbook_on_handler_task_start_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_HANDLER_TASK_START_MSG + vars: + - name: ansible_callback_diy_playbook_on_handler_task_start_msg + type: str - playbook_on_handler_task_start_msg_color: - description: - - Output color to be used for I(playbook_on_handler_task_start_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_handler_task_start_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_HANDLER_TASK_START_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_handler_task_start_msg_color - type: str + playbook_on_handler_task_start_msg_color: + description: + - Output color to be used for O(playbook_on_handler_task_start_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_handler_task_start_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_HANDLER_TASK_START_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_handler_task_start_msg_color + type: str - playbook_on_vars_prompt_msg: - description: Output to be used for callback playbook_on_vars_prompt. - ini: - - section: callback_diy - key: playbook_on_vars_prompt_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_VARS_PROMPT_MSG - vars: - - name: ansible_callback_diy_playbook_on_vars_prompt_msg - type: str + playbook_on_vars_prompt_msg: + description: Output to be used for callback playbook_on_vars_prompt. + ini: + - section: callback_diy + key: playbook_on_vars_prompt_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_VARS_PROMPT_MSG + vars: + - name: ansible_callback_diy_playbook_on_vars_prompt_msg + type: str - playbook_on_vars_prompt_msg_color: - description: - - Output color to be used for I(playbook_on_vars_prompt_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_vars_prompt_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_VARS_PROMPT_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_vars_prompt_msg_color - type: str + playbook_on_vars_prompt_msg_color: + description: + - Output color to be used for O(playbook_on_vars_prompt_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_vars_prompt_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_VARS_PROMPT_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_vars_prompt_msg_color + type: str - playbook_on_play_start_msg: - description: Output to be used for callback playbook_on_play_start. - ini: - - section: callback_diy - key: playbook_on_play_start_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_PLAY_START_MSG - vars: - - name: ansible_callback_diy_playbook_on_play_start_msg - type: str + playbook_on_play_start_msg: + description: Output to be used for callback playbook_on_play_start. + ini: + - section: callback_diy + key: playbook_on_play_start_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_PLAY_START_MSG + vars: + - name: ansible_callback_diy_playbook_on_play_start_msg + type: str - playbook_on_play_start_msg_color: - description: - - Output color to be used for I(playbook_on_play_start_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_play_start_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_PLAY_START_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_play_start_msg_color - type: str + playbook_on_play_start_msg_color: + description: + - Output color to be used for O(playbook_on_play_start_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_play_start_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_PLAY_START_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_play_start_msg_color + type: str - playbook_on_stats_msg: - description: Output to be used for callback playbook_on_stats. - ini: - - section: callback_diy - key: playbook_on_stats_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_STATS_MSG - vars: - - name: ansible_callback_diy_playbook_on_stats_msg - type: str + playbook_on_stats_msg: + description: Output to be used for callback playbook_on_stats. + ini: + - section: callback_diy + key: playbook_on_stats_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_STATS_MSG + vars: + - name: ansible_callback_diy_playbook_on_stats_msg + type: str - playbook_on_stats_msg_color: - description: - - Output color to be used for I(playbook_on_stats_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_stats_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_STATS_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_stats_msg_color - type: str + playbook_on_stats_msg_color: + description: + - Output color to be used for O(playbook_on_stats_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_stats_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_STATS_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_stats_msg_color + type: str - on_file_diff_msg: - description: Output to be used for callback on_file_diff. - ini: - - section: callback_diy - key: on_file_diff_msg - env: - - name: ANSIBLE_CALLBACK_DIY_ON_FILE_DIFF_MSG - vars: - - name: ansible_callback_diy_on_file_diff_msg - type: str + on_file_diff_msg: + description: Output to be used for callback on_file_diff. + ini: + - section: callback_diy + key: on_file_diff_msg + env: + - name: ANSIBLE_CALLBACK_DIY_ON_FILE_DIFF_MSG + vars: + - name: ansible_callback_diy_on_file_diff_msg + type: str - on_file_diff_msg_color: - description: - - Output color to be used for I(on_file_diff_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: on_file_diff_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_ON_FILE_DIFF_MSG_COLOR - vars: - - name: ansible_callback_diy_on_file_diff_msg_color - type: str + on_file_diff_msg_color: + description: + - Output color to be used for O(on_file_diff_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: on_file_diff_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_ON_FILE_DIFF_MSG_COLOR + vars: + - name: ansible_callback_diy_on_file_diff_msg_color + type: str - playbook_on_include_msg: - description: Output to be used for callback playbook_on_include. - ini: - - section: callback_diy - key: playbook_on_include_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_INCLUDE_MSG - vars: - - name: ansible_callback_diy_playbook_on_include_msg - type: str + playbook_on_include_msg: + description: Output to be used for callback playbook_on_include. + ini: + - section: callback_diy + key: playbook_on_include_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_INCLUDE_MSG + vars: + - name: ansible_callback_diy_playbook_on_include_msg + type: str - playbook_on_include_msg_color: - description: - - Output color to be used for I(playbook_on_include_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_include_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_INCLUDE_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_include_msg_color - type: str + playbook_on_include_msg_color: + description: + - Output color to be used for O(playbook_on_include_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_include_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_INCLUDE_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_include_msg_color + type: str - runner_item_on_ok_msg: - description: Output to be used for callback runner_item_on_ok. - ini: - - section: callback_diy - key: runner_item_on_ok_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_OK_MSG - vars: - - name: ansible_callback_diy_runner_item_on_ok_msg - type: str + runner_item_on_ok_msg: + description: Output to be used for callback runner_item_on_ok. + ini: + - section: callback_diy + key: runner_item_on_ok_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_OK_MSG + vars: + - name: ansible_callback_diy_runner_item_on_ok_msg + type: str - runner_item_on_ok_msg_color: - description: - - Output color to be used for I(runner_item_on_ok_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_item_on_ok_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_OK_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_item_on_ok_msg_color - type: str + runner_item_on_ok_msg_color: + description: + - Output color to be used for O(runner_item_on_ok_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_item_on_ok_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_OK_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_item_on_ok_msg_color + type: str - runner_item_on_failed_msg: - description: Output to be used for callback runner_item_on_failed. - ini: - - section: callback_diy - key: runner_item_on_failed_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_FAILED_MSG - vars: - - name: ansible_callback_diy_runner_item_on_failed_msg - type: str + runner_item_on_failed_msg: + description: Output to be used for callback runner_item_on_failed. + ini: + - section: callback_diy + key: runner_item_on_failed_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_FAILED_MSG + vars: + - name: ansible_callback_diy_runner_item_on_failed_msg + type: str - runner_item_on_failed_msg_color: - description: - - Output color to be used for I(runner_item_on_failed_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_item_on_failed_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_FAILED_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_item_on_failed_msg_color - type: str + runner_item_on_failed_msg_color: + description: + - Output color to be used for O(runner_item_on_failed_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_item_on_failed_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_FAILED_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_item_on_failed_msg_color + type: str - runner_item_on_skipped_msg: - description: Output to be used for callback runner_item_on_skipped. - ini: - - section: callback_diy - key: runner_item_on_skipped_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_SKIPPED_MSG - vars: - - name: ansible_callback_diy_runner_item_on_skipped_msg - type: str + runner_item_on_skipped_msg: + description: Output to be used for callback runner_item_on_skipped. + ini: + - section: callback_diy + key: runner_item_on_skipped_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_SKIPPED_MSG + vars: + - name: ansible_callback_diy_runner_item_on_skipped_msg + type: str - runner_item_on_skipped_msg_color: - description: - - Output color to be used for I(runner_item_on_skipped_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_item_on_skipped_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_SKIPPED_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_item_on_skipped_msg_color - type: str + runner_item_on_skipped_msg_color: + description: + - Output color to be used for O(runner_item_on_skipped_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_item_on_skipped_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ITEM_ON_SKIPPED_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_item_on_skipped_msg_color + type: str - runner_retry_msg: - description: Output to be used for callback runner_retry. - ini: - - section: callback_diy - key: runner_retry_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_RETRY_MSG - vars: - - name: ansible_callback_diy_runner_retry_msg - type: str + runner_retry_msg: + description: Output to be used for callback runner_retry. + ini: + - section: callback_diy + key: runner_retry_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_RETRY_MSG + vars: + - name: ansible_callback_diy_runner_retry_msg + type: str - runner_retry_msg_color: - description: - - Output color to be used for I(runner_retry_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_retry_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_RETRY_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_retry_msg_color - type: str + runner_retry_msg_color: + description: + - Output color to be used for O(runner_retry_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_retry_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_RETRY_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_retry_msg_color + type: str - runner_on_start_msg: - description: Output to be used for callback runner_on_start. - ini: - - section: callback_diy - key: runner_on_start_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_START_MSG - vars: - - name: ansible_callback_diy_runner_on_start_msg - type: str + runner_on_start_msg: + description: Output to be used for callback runner_on_start. + ini: + - section: callback_diy + key: runner_on_start_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_START_MSG + vars: + - name: ansible_callback_diy_runner_on_start_msg + type: str - runner_on_start_msg_color: - description: - - Output color to be used for I(runner_on_start_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_on_start_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_START_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_on_start_msg_color - type: str + runner_on_start_msg_color: + description: + - Output color to be used for O(runner_on_start_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_on_start_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_START_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_on_start_msg_color + type: str - runner_on_no_hosts_msg: - description: Output to be used for callback runner_on_no_hosts. - ini: - - section: callback_diy - key: runner_on_no_hosts_msg - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_NO_HOSTS_MSG - vars: - - name: ansible_callback_diy_runner_on_no_hosts_msg - type: str + runner_on_no_hosts_msg: + description: Output to be used for callback runner_on_no_hosts. + ini: + - section: callback_diy + key: runner_on_no_hosts_msg + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_NO_HOSTS_MSG + vars: + - name: ansible_callback_diy_runner_on_no_hosts_msg + type: str - runner_on_no_hosts_msg_color: - description: - - Output color to be used for I(runner_on_no_hosts_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: runner_on_no_hosts_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_NO_HOSTS_MSG_COLOR - vars: - - name: ansible_callback_diy_runner_on_no_hosts_msg_color - type: str + runner_on_no_hosts_msg_color: + description: + - Output color to be used for O(runner_on_no_hosts_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: runner_on_no_hosts_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_RUNNER_ON_NO_HOSTS_MSG_COLOR + vars: + - name: ansible_callback_diy_runner_on_no_hosts_msg_color + type: str - playbook_on_setup_msg: - description: Output to be used for callback playbook_on_setup. - ini: - - section: callback_diy - key: playbook_on_setup_msg - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_SETUP_MSG - vars: - - name: ansible_callback_diy_playbook_on_setup_msg - type: str + playbook_on_setup_msg: + description: Output to be used for callback playbook_on_setup. + ini: + - section: callback_diy + key: playbook_on_setup_msg + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_SETUP_MSG + vars: + - name: ansible_callback_diy_playbook_on_setup_msg + type: str - playbook_on_setup_msg_color: - description: - - Output color to be used for I(playbook_on_setup_msg). - - Template should render a L(valid color value,#notes). - ini: - - section: callback_diy - key: playbook_on_setup_msg_color - env: - - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_SETUP_MSG_COLOR - vars: - - name: ansible_callback_diy_playbook_on_setup_msg_color - type: str -''' + playbook_on_setup_msg_color: + description: + - Output color to be used for O(playbook_on_setup_msg). + - Template should render a L(valid color value,#notes). + ini: + - section: callback_diy + key: playbook_on_setup_msg_color + env: + - name: ANSIBLE_CALLBACK_DIY_PLAYBOOK_ON_SETUP_MSG_COLOR + vars: + - name: ansible_callback_diy_playbook_on_setup_msg_color + type: str +""" -EXAMPLES = r''' +EXAMPLES = r""" ansible.cfg: > # Enable plugin [defaults] @@ -623,7 +616,7 @@ ansible.cfg: > # Newline after every callback # on_any_msg='{{ " " | join("\n") }}' -playbook.yml: > +playbook.yml: >- --- - name: "Default plugin output: play example" hosts: localhost @@ -782,19 +775,21 @@ playbook.yml: > {{ white }}{{ ansible_callback_diy[key] }} {% endfor %} -''' +""" import sys from contextlib import contextmanager -from ansible import constants as C -from ansible.playbook.task_include import TaskInclude -from ansible.plugins.callback import CallbackBase -from ansible.utils.color import colorize, hostcolor from ansible.template import Templar from ansible.vars.manager import VariableManager from ansible.plugins.callback.default import CallbackModule as Default from ansible.module_utils.common.text.converters import to_text +try: + from ansible.template import trust_as_template # noqa: F401, pylint: disable=unused-import + SUPPORTS_DATA_TAGGING = True +except ImportError: + SUPPORTS_DATA_TAGGING = False + class DummyStdout(object): def flush(self): @@ -832,9 +827,9 @@ class CallbackModule(Default): _callback_options = ['msg', 'msg_color'] for option in _callback_options: - _option_name = '%s_%s' % (_callback_type, option) + _option_name = f'{_callback_type}_{option}' _option_template = variables.get( - self.DIY_NS + "_" + _option_name, + f"{self.DIY_NS}_{_option_name}", self.get_option(_option_name) ) _ret.update({option: self._template( @@ -848,7 +843,10 @@ class CallbackModule(Default): return _ret def _using_diy(self, spec): - return (spec['msg'] is not None) and (spec['msg'] != spec['vars']['omit']) + sentinel = object() + omit = spec['vars'].get('omit', sentinel) + # With Data Tagging, omit is sentinel + return (spec['msg'] is not None) and (spec['msg'] != omit or omit is sentinel) def _parent_has_callback(self): return hasattr(super(CallbackModule, self), sys._getframe(1).f_code.co_name) @@ -871,7 +869,7 @@ class CallbackModule(Default): handler=None, result=None, stats=None, remove_attr_ref_loop=True): def _get_value(obj, attr=None, method=None): if attr: - return getattr(obj, attr, getattr(obj, "_" + attr, None)) + return getattr(obj, attr, getattr(obj, f"_{attr}", None)) if method: _method = getattr(obj, method) @@ -904,7 +902,7 @@ class CallbackModule(Default): ) _ret.update(_all) - _ret.update(_ret.get(self.DIY_NS, {self.DIY_NS: CallbackDIYDict()})) + _ret.update(_ret.get(self.DIY_NS, {self.DIY_NS: {} if SUPPORTS_DATA_TAGGING else CallbackDIYDict()})) _ret[self.DIY_NS].update({'playbook': {}}) _playbook_attributes = ['entries', 'file_name', 'basedir'] diff --git a/plugins/callback/elastic.py b/plugins/callback/elastic.py index 37526c155d..82478b9e7d 100644 --- a/plugins/callback/elastic.py +++ b/plugins/callback/elastic.py @@ -2,72 +2,71 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Victor Martinez (@v1v) - name: elastic - type: notification - short_description: Create distributed traces for each Ansible task in Elastic APM - version_added: 3.8.0 +DOCUMENTATION = r""" +author: Victor Martinez (@v1v) +name: elastic +type: notification +short_description: Create distributed traces for each Ansible task in Elastic APM +version_added: 3.8.0 +description: + - This callback creates distributed traces for each Ansible task in Elastic APM. + - You can configure the plugin with environment variables. + - See U(https://www.elastic.co/guide/en/apm/agent/python/current/configuration.html). +options: + hide_task_arguments: + default: false + type: bool description: - - This callback creates distributed traces for each Ansible task in Elastic APM. - - You can configure the plugin with environment variables. - - See U(https://www.elastic.co/guide/en/apm/agent/python/current/configuration.html). - options: - hide_task_arguments: - default: false - type: bool - description: - - Hide the arguments for a task. - env: - - name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS - apm_service_name: - default: ansible - type: str - description: - - The service name resource attribute. - env: - - name: ELASTIC_APM_SERVICE_NAME - apm_server_url: - type: str - description: - - Use the APM server and its environment variables. - env: - - name: ELASTIC_APM_SERVER_URL - apm_secret_token: - type: str - description: - - Use the APM server token - env: - - name: ELASTIC_APM_SECRET_TOKEN - apm_api_key: - type: str - description: - - Use the APM API key - env: - - name: ELASTIC_APM_API_KEY - apm_verify_server_cert: - default: true - type: bool - description: - - Verifies the SSL certificate if an HTTPS connection. - env: - - name: ELASTIC_APM_VERIFY_SERVER_CERT - traceparent: - type: str - description: - - The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header). - env: - - name: TRACEPARENT - requirements: - - elastic-apm (Python library) -''' + - Hide the arguments for a task. + env: + - name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS + apm_service_name: + default: ansible + type: str + description: + - The service name resource attribute. + env: + - name: ELASTIC_APM_SERVICE_NAME + apm_server_url: + type: str + description: + - Use the APM server and its environment variables. + env: + - name: ELASTIC_APM_SERVER_URL + apm_secret_token: + type: str + description: + - Use the APM server token. + env: + - name: ELASTIC_APM_SECRET_TOKEN + apm_api_key: + type: str + description: + - Use the APM API key. + env: + - name: ELASTIC_APM_API_KEY + apm_verify_server_cert: + default: true + type: bool + description: + - Verifies the SSL certificate if an HTTPS connection. + env: + - name: ELASTIC_APM_VERIFY_SERVER_CERT + traceparent: + type: str + description: + - The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header). + env: + - name: TRACEPARENT +requirements: + - elastic-apm (Python library) +""" -EXAMPLES = ''' -examples: | +EXAMPLES = r""" +examples: |- Enable the plugin in ansible.cfg: [defaults] callbacks_enabled = community.general.elastic @@ -76,7 +75,7 @@ examples: | export ELASTIC_APM_SERVER_URL= export ELASTIC_APM_SERVICE_NAME=your_service_name export ELASTIC_APM_API_KEY=your_APM_API_KEY -''' +""" import getpass import socket @@ -84,10 +83,11 @@ import time import uuid from collections import OrderedDict +from contextlib import closing from os.path import basename from ansible.errors import AnsibleError, AnsibleRuntimeError -from ansible.module_utils.six import raise_from +from ansible.module_utils.ansible_release import __version__ as ansible_version from ansible.plugins.callback import CallbackBase try: @@ -117,7 +117,7 @@ class TaskData: if host.uuid in self.host_data: if host.status == 'included': # concatenate task include output from multiple items - host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result) + host.result = f'{self.host_data[host.uuid].result}\n{host.result}' else: return @@ -140,7 +140,6 @@ class HostData: class ElasticSource(object): def __init__(self, display): self.ansible_playbook = "" - self.ansible_version = None self.session = str(uuid.uuid4()) self.host = socket.gethostname() try: @@ -165,7 +164,7 @@ class ElasticSource(object): args = None if not task.no_log and not hide_task_arguments: - args = ', '.join(('%s=%s' % a for a in task.args.items())) + args = ', '.join((f'{k}={v}' for k, v in task.args.items())) tasks_data[uuid] = TaskData(uuid, name, path, play_name, action, args) @@ -183,9 +182,6 @@ class ElasticSource(object): task = tasks_data[task_uuid] - if self.ansible_version is None and result._task_fields['args'].get('_ansible_version'): - self.ansible_version = result._task_fields['args'].get('_ansible_version') - task.add_host(HostData(host_uuid, host_name, status, result)) def generate_distributed_traces(self, tasks_data, status, end_time, traceparent, apm_service_name, @@ -201,29 +197,29 @@ class ElasticSource(object): apm_cli = self.init_apm_client(apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key) if apm_cli: - instrument() # Only call this once, as early as possible. - if traceparent: - parent = trace_parent_from_string(traceparent) - apm_cli.begin_transaction("Session", trace_parent=parent, start=parent_start_time) - else: - apm_cli.begin_transaction("Session", start=parent_start_time) - # Populate trace metadata attributes - if self.ansible_version is not None: - label(ansible_version=self.ansible_version) - label(ansible_session=self.session, ansible_host_name=self.host, ansible_host_user=self.user) - if self.ip_address is not None: - label(ansible_host_ip=self.ip_address) + with closing(apm_cli): + instrument() # Only call this once, as early as possible. + if traceparent: + parent = trace_parent_from_string(traceparent) + apm_cli.begin_transaction("Session", trace_parent=parent, start=parent_start_time) + else: + apm_cli.begin_transaction("Session", start=parent_start_time) + # Populate trace metadata attributes + label(ansible_version=ansible_version) + label(ansible_session=self.session, ansible_host_name=self.host, ansible_host_user=self.user) + if self.ip_address is not None: + label(ansible_host_ip=self.ip_address) - for task_data in tasks: - for host_uuid, host_data in task_data.host_data.items(): - self.create_span_data(apm_cli, task_data, host_data) + for task_data in tasks: + for host_uuid, host_data in task_data.host_data.items(): + self.create_span_data(apm_cli, task_data, host_data) - apm_cli.end_transaction(name=__name__, result=status, duration=end_time - parent_start_time) + apm_cli.end_transaction(name=__name__, result=status, duration=end_time - parent_start_time) def create_span_data(self, apm_cli, task_data, host_data): """ create the span with the given TaskData and HostData """ - name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name) + name = f'[{host_data.name}] {task_data.play}: {task_data.name}' message = "success" status = "success" @@ -257,7 +253,7 @@ class ElasticSource(object): "ansible.task.host.status": host_data.status}) as span: span.outcome = status if 'failure' in status: - exception = AnsibleRuntimeError(message="{0}: {1} failed with error message {2}".format(task_data.action, name, enriched_error_message)) + exception = AnsibleRuntimeError(message=f"{task_data.action}: {name} failed with error message {enriched_error_message}") apm_cli.capture_exception(exc_info=(type(exception), exception, exception.__traceback__), handled=True) def init_apm_client(self, apm_server_url, apm_service_name, apm_verify_server_cert, apm_secret_token, apm_api_key): @@ -286,7 +282,7 @@ class ElasticSource(object): message = result.get('msg', 'failed') exception = result.get('exception') stderr = result.get('stderr') - return ('message: "{0}"\nexception: "{1}"\nstderr: "{2}"').format(message, exception, stderr) + return f"message: \"{message}\"\nexception: \"{exception}\"\nstderr: \"{stderr}\"" class CallbackModule(CallbackBase): @@ -311,9 +307,7 @@ class CallbackModule(CallbackBase): self.disabled = False if ELASTIC_LIBRARY_IMPORT_ERROR: - raise_from( - AnsibleError('The `elastic-apm` must be installed to use this plugin'), - ELASTIC_LIBRARY_IMPORT_ERROR) + raise AnsibleError('The `elastic-apm` must be installed to use this plugin') from ELASTIC_LIBRARY_IMPORT_ERROR self.tasks_data = OrderedDict() diff --git a/plugins/callback/hipchat.py b/plugins/callback/hipchat.py deleted file mode 100644 index 3e10b69e7f..0000000000 --- a/plugins/callback/hipchat.py +++ /dev/null @@ -1,229 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2014, Matt Martz -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: hipchat - type: notification - requirements: - - whitelist in configuration. - - prettytable (python lib) - short_description: post task events to hipchat - description: - - This callback plugin sends status updates to a HipChat channel during playbook execution. - - Before 2.4 only environment variables were available for configuring this plugin. - options: - token: - description: HipChat API token for v1 or v2 API. - required: true - env: - - name: HIPCHAT_TOKEN - ini: - - section: callback_hipchat - key: token - api_version: - description: HipChat API version, v1 or v2. - required: false - default: v1 - env: - - name: HIPCHAT_API_VERSION - ini: - - section: callback_hipchat - key: api_version - room: - description: HipChat room to post in. - default: ansible - env: - - name: HIPCHAT_ROOM - ini: - - section: callback_hipchat - key: room - from: - description: Name to post as - default: ansible - env: - - name: HIPCHAT_FROM - ini: - - section: callback_hipchat - key: from - notify: - description: Add notify flag to important messages - type: bool - default: true - env: - - name: HIPCHAT_NOTIFY - ini: - - section: callback_hipchat - key: notify - -''' - -import os -import json - -try: - import prettytable - HAS_PRETTYTABLE = True -except ImportError: - HAS_PRETTYTABLE = False - -from ansible.plugins.callback import CallbackBase -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import open_url - - -class CallbackModule(CallbackBase): - """This is an example ansible callback plugin that sends status - updates to a HipChat channel during playbook execution. - """ - - CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'notification' - CALLBACK_NAME = 'community.general.hipchat' - CALLBACK_NEEDS_WHITELIST = True - - API_V1_URL = 'https://api.hipchat.com/v1/rooms/message' - API_V2_URL = 'https://api.hipchat.com/v2/' - - def __init__(self): - - super(CallbackModule, self).__init__() - - if not HAS_PRETTYTABLE: - self.disabled = True - self._display.warning('The `prettytable` python module is not installed. ' - 'Disabling the HipChat callback plugin.') - self.printed_playbook = False - self.playbook_name = None - self.play = None - - def set_options(self, task_keys=None, var_options=None, direct=None): - super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) - - self.token = self.get_option('token') - self.api_version = self.get_option('api_version') - self.from_name = self.get_option('from') - self.allow_notify = self.get_option('notify') - self.room = self.get_option('room') - - if self.token is None: - self.disabled = True - self._display.warning('HipChat token could not be loaded. The HipChat ' - 'token can be provided using the `HIPCHAT_TOKEN` ' - 'environment variable.') - - # Pick the request handler. - if self.api_version == 'v2': - self.send_msg = self.send_msg_v2 - else: - self.send_msg = self.send_msg_v1 - - def send_msg_v2(self, msg, msg_format='text', color='yellow', notify=False): - """Method for sending a message to HipChat""" - - headers = {'Authorization': 'Bearer %s' % self.token, 'Content-Type': 'application/json'} - - body = {} - body['room_id'] = self.room - body['from'] = self.from_name[:15] # max length is 15 - body['message'] = msg - body['message_format'] = msg_format - body['color'] = color - body['notify'] = self.allow_notify and notify - - data = json.dumps(body) - url = self.API_V2_URL + "room/{room_id}/notification".format(room_id=self.room) - try: - response = open_url(url, data=data, headers=headers, method='POST') - return response.read() - except Exception as ex: - self._display.warning('Could not submit message to hipchat: {0}'.format(ex)) - - def send_msg_v1(self, msg, msg_format='text', color='yellow', notify=False): - """Method for sending a message to HipChat""" - - params = {} - params['room_id'] = self.room - params['from'] = self.from_name[:15] # max length is 15 - params['message'] = msg - params['message_format'] = msg_format - params['color'] = color - params['notify'] = int(self.allow_notify and notify) - - url = ('%s?auth_token=%s' % (self.API_V1_URL, self.token)) - try: - response = open_url(url, data=urlencode(params)) - return response.read() - except Exception as ex: - self._display.warning('Could not submit message to hipchat: {0}'.format(ex)) - - def v2_playbook_on_play_start(self, play): - """Display Playbook and play start messages""" - - self.play = play - name = play.name - # This block sends information about a playbook when it starts - # The playbook object is not immediately available at - # playbook_on_start so we grab it via the play - # - # Displays info about playbook being started by a person on an - # inventory, as well as Tags, Skip Tags and Limits - if not self.printed_playbook: - self.playbook_name, dummy = os.path.splitext(os.path.basename(self.play.playbook.filename)) - host_list = self.play.playbook.inventory.host_list - inventory = os.path.basename(os.path.realpath(host_list)) - self.send_msg("%s: Playbook initiated by %s against %s" % - (self.playbook_name, - self.play.playbook.remote_user, - inventory), notify=True) - self.printed_playbook = True - subset = self.play.playbook.inventory._subset - skip_tags = self.play.playbook.skip_tags - self.send_msg("%s:\nTags: %s\nSkip Tags: %s\nLimit: %s" % - (self.playbook_name, - ', '.join(self.play.playbook.only_tags), - ', '.join(skip_tags) if skip_tags else None, - ', '.join(subset) if subset else subset)) - - # This is where we actually say we are starting a play - self.send_msg("%s: Starting play: %s" % - (self.playbook_name, name)) - - def playbook_on_stats(self, stats): - """Display info about playbook statistics""" - hosts = sorted(stats.processed.keys()) - - t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable', - 'Failures']) - - failures = False - unreachable = False - - for h in hosts: - s = stats.summarize(h) - - if s['failures'] > 0: - failures = True - if s['unreachable'] > 0: - unreachable = True - - t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable', - 'failures']]) - - self.send_msg("%s: Playbook complete" % self.playbook_name, - notify=True) - - if failures or unreachable: - color = 'red' - self.send_msg("%s: Failures detected" % self.playbook_name, - color=color, notify=True) - else: - color = 'green' - - self.send_msg("/code %s:\n%s" % (self.playbook_name, t), color=color) diff --git a/plugins/callback/jabber.py b/plugins/callback/jabber.py index d2d00496d8..319611d460 100644 --- a/plugins/callback/jabber.py +++ b/plugins/callback/jabber.py @@ -1,44 +1,46 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2016 maxn nikolaev.makc@gmail.com # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: jabber - type: notification - short_description: post task events to a jabber server - description: - - The chatty part of ChatOps with a Hipchat server as a target. - - This callback plugin sends status updates to a HipChat channel during playbook execution. - requirements: - - xmpp (Python library U(https://github.com/ArchipelProject/xmpppy)) - options: - server: - description: connection info to jabber server - required: true - env: - - name: JABBER_SERV - user: - description: Jabber user to authenticate as - required: true - env: - - name: JABBER_USER - password: - description: Password for the user to the jabber server - required: true - env: - - name: JABBER_PASS - to: - description: chat identifier that will receive the message - required: true - env: - - name: JABBER_TO -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: jabber +type: notification +short_description: Post task events to a Jabber server +description: + - The chatty part of ChatOps with a Hipchat server as a target. + - This callback plugin sends status updates to a HipChat channel during playbook execution. +requirements: + - xmpp (Python library U(https://github.com/ArchipelProject/xmpppy)) +options: + server: + description: Connection info to Jabber server. + type: str + required: true + env: + - name: JABBER_SERV + user: + description: Jabber user to authenticate as. + type: str + required: true + env: + - name: JABBER_USER + password: + description: Password for the user to the Jabber server. + type: str + required: true + env: + - name: JABBER_PASS + to: + description: Chat identifier that receives the message. + type: str + required: true + env: + - name: JABBER_TO +""" import os @@ -98,7 +100,7 @@ class CallbackModule(CallbackBase): """Display Playbook and play start messages""" self.play = play name = play.name - self.send_msg("Ansible starting play: %s" % (name)) + self.send_msg(f"Ansible starting play: {name}") def playbook_on_stats(self, stats): name = self.play @@ -114,7 +116,7 @@ class CallbackModule(CallbackBase): if failures or unreachable: out = self.debug - self.send_msg("%s: Failures detected \n%s \nHost: %s\n Failed at:\n%s" % (name, self.task, h, out)) + self.send_msg(f"{name}: Failures detected \n{self.task} \nHost: {h}\n Failed at:\n{out}") else: out = self.debug - self.send_msg("Great! \n Playbook %s completed:\n%s \n Last task debug:\n %s" % (name, s, out)) + self.send_msg(f"Great! \n Playbook {name} completed:\n{s} \n Last task debug:\n {out}") diff --git a/plugins/callback/log_plays.py b/plugins/callback/log_plays.py index e99054e176..89ec8cbff3 100644 --- a/plugins/callback/log_plays.py +++ b/plugins/callback/log_plays.py @@ -1,32 +1,31 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2012, Michael DeHaan, # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: log_plays - type: notification - short_description: write playbook output to log file - description: - - This callback writes playbook output to a file per host in the C(/var/log/ansible/hosts) directory. - requirements: - - Whitelist in configuration - - A writeable C(/var/log/ansible/hosts) directory by the user executing Ansible on the controller - options: - log_folder: - default: /var/log/ansible/hosts - description: The folder where log files will be created. - env: - - name: ANSIBLE_LOG_FOLDER - ini: - - section: callback_log_plays - key: log_folder -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: log_plays +type: notification +short_description: Write playbook output to log file +description: + - This callback writes playbook output to a file per host in the C(/var/log/ansible/hosts) directory. +requirements: + - Whitelist in configuration + - A writeable C(/var/log/ansible/hosts) directory by the user executing Ansible on the controller +options: + log_folder: + default: /var/log/ansible/hosts + description: The folder where log files are created. + type: str + env: + - name: ANSIBLE_LOG_FOLDER + ini: + - section: callback_log_plays + key: log_folder +""" import os import time @@ -34,7 +33,7 @@ import json from ansible.utils.path import makedirs_safe from ansible.module_utils.common.text.converters import to_bytes -from ansible.module_utils.common._collections_compat import MutableMapping +from collections.abc import MutableMapping from ansible.parsing.ajson import AnsibleJSONEncoder from ansible.plugins.callback import CallbackBase @@ -56,7 +55,10 @@ class CallbackModule(CallbackBase): CALLBACK_NEEDS_WHITELIST = True TIME_FORMAT = "%b %d %Y %H:%M:%S" - MSG_FORMAT = "%(now)s - %(playbook)s - %(task_name)s - %(task_action)s - %(category)s - %(data)s\n\n" + + @staticmethod + def _make_msg(now, playbook, task_name, task_action, category, data): + return f"{now} - {playbook} - {task_name} - {task_action} - {category} - {data}\n\n" def __init__(self): @@ -81,22 +83,12 @@ class CallbackModule(CallbackBase): invocation = data.pop('invocation', None) data = json.dumps(data, cls=AnsibleJSONEncoder) if invocation is not None: - data = json.dumps(invocation) + " => %s " % data + data = f"{json.dumps(invocation)} => {data} " path = os.path.join(self.log_folder, result._host.get_name()) now = time.strftime(self.TIME_FORMAT, time.localtime()) - msg = to_bytes( - self.MSG_FORMAT - % dict( - now=now, - playbook=self.playbook, - task_name=result._task.name, - task_action=result._task.action, - category=category, - data=data, - ) - ) + msg = to_bytes(self._make_msg(now, self.playbook, result._task.name, result._task.action, category, data)) with open(path, "ab") as fd: fd.write(msg) diff --git a/plugins/callback/loganalytics.py b/plugins/callback/loganalytics.py index 8690aac934..05996f2492 100644 --- a/plugins/callback/loganalytics.py +++ b/plugins/callback/loganalytics.py @@ -1,44 +1,44 @@ -# -*- coding: utf-8 -*- # Copyright (c) Ansible project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: loganalytics - type: notification - short_description: Posts task results to Azure Log Analytics - author: "Cyrus Li (@zhcli) " - description: - - This callback plugin will post task results in JSON formatted to an Azure Log Analytics workspace. - - Credits to authors of splunk callback plugin. - version_added: "2.4.0" - requirements: - - Whitelisting this callback plugin. - - An Azure log analytics work space has been established. - options: - workspace_id: - description: Workspace ID of the Azure log analytics workspace. - required: true - env: - - name: WORKSPACE_ID - ini: - - section: callback_loganalytics - key: workspace_id - shared_key: - description: Shared key to connect to Azure log analytics workspace. - required: true - env: - - name: WORKSPACE_SHARED_KEY - ini: - - section: callback_loganalytics - key: shared_key -''' +DOCUMENTATION = r""" +name: loganalytics +type: notification +short_description: Posts task results to Azure Log Analytics +author: "Cyrus Li (@zhcli) " +description: + - This callback plugin posts task results in JSON formatted to an Azure Log Analytics workspace. + - Credits to authors of splunk callback plugin. +version_added: "2.4.0" +requirements: + - Whitelisting this callback plugin. + - An Azure log analytics work space has been established. +options: + workspace_id: + description: Workspace ID of the Azure log analytics workspace. + type: str + required: true + env: + - name: WORKSPACE_ID + ini: + - section: callback_loganalytics + key: workspace_id + shared_key: + description: Shared key to connect to Azure log analytics workspace. + type: str + required: true + env: + - name: WORKSPACE_SHARED_KEY + ini: + - section: callback_loganalytics + key: shared_key +""" -EXAMPLES = ''' -examples: | +EXAMPLES = r""" +examples: |- Whitelist the plugin in ansible.cfg: [defaults] callback_whitelist = community.general.loganalytics @@ -49,30 +49,32 @@ examples: | [callback_loganalytics] workspace_id = 01234567-0123-0123-0123-01234567890a shared_key = dZD0kCbKl3ehZG6LHFMuhtE0yHiFCmetzFMc2u+roXIUQuatqU924SsAAAAPemhjbGlAemhjbGktTUJQAQIDBA== -''' +""" import hashlib import hmac import base64 -import logging import json import uuid import socket import getpass -from datetime import datetime from os.path import basename +from ansible.module_utils.ansible_release import __version__ as ansible_version from ansible.module_utils.urls import open_url from ansible.parsing.ajson import AnsibleJSONEncoder from ansible.plugins.callback import CallbackBase +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + class AzureLogAnalyticsSource(object): def __init__(self): self.ansible_check_mode = False self.ansible_playbook = "" - self.ansible_version = "" self.session = str(uuid.uuid4()) self.host = socket.gethostname() self.user = getpass.getuser() @@ -80,30 +82,25 @@ class AzureLogAnalyticsSource(object): def __build_signature(self, date, workspace_id, shared_key, content_length): # Build authorisation signature for Azure log analytics API call - sigs = "POST\n{0}\napplication/json\nx-ms-date:{1}\n/api/logs".format( - str(content_length), date) + sigs = f"POST\n{content_length}\napplication/json\nx-ms-date:{date}\n/api/logs" utf8_sigs = sigs.encode('utf-8') decoded_shared_key = base64.b64decode(shared_key) hmac_sha256_sigs = hmac.new( decoded_shared_key, utf8_sigs, digestmod=hashlib.sha256).digest() encoded_hash = base64.b64encode(hmac_sha256_sigs).decode('utf-8') - signature = "SharedKey {0}:{1}".format(workspace_id, encoded_hash) + signature = f"SharedKey {workspace_id}:{encoded_hash}" return signature def __build_workspace_url(self, workspace_id): - return "https://{0}.ods.opinsights.azure.com/api/logs?api-version=2016-04-01".format(workspace_id) + return f"https://{workspace_id}.ods.opinsights.azure.com/api/logs?api-version=2016-04-01" def __rfc1123date(self): - return datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT') + return now().strftime('%a, %d %b %Y %H:%M:%S GMT') def send_event(self, workspace_id, shared_key, state, result, runtime): if result._task_fields['args'].get('_ansible_check_mode') is True: self.ansible_check_mode = True - if result._task_fields['args'].get('_ansible_version'): - self.ansible_version = \ - result._task_fields['args'].get('_ansible_version') - if result._task._role: ansible_role = str(result._task._role) else: @@ -117,7 +114,7 @@ class AzureLogAnalyticsSource(object): data['host'] = self.host data['user'] = self.user data['runtime'] = runtime - data['ansible_version'] = self.ansible_version + data['ansible_version'] = ansible_version data['ansible_check_mode'] = self.ansible_check_mode data['ansible_host'] = result._host.name data['ansible_playbook'] = self.ansible_playbook @@ -168,7 +165,7 @@ class CallbackModule(CallbackBase): def _seconds_since_start(self, result): return ( - datetime.utcnow() - + now() - self.start_datetimes[result._task._uuid] ).total_seconds() @@ -186,10 +183,10 @@ class CallbackModule(CallbackBase): self.loganalytics.ansible_playbook = basename(playbook._file_name) def v2_playbook_on_task_start(self, task, is_conditional): - self.start_datetimes[task._uuid] = datetime.utcnow() + self.start_datetimes[task._uuid] = now() def v2_playbook_on_handler_task_start(self, task): - self.start_datetimes[task._uuid] = datetime.utcnow() + self.start_datetimes[task._uuid] = now() def v2_runner_on_ok(self, result, **kwargs): self.loganalytics.send_event( diff --git a/plugins/callback/logdna.py b/plugins/callback/logdna.py index fc9a81ac8a..09d8b38dcb 100644 --- a/plugins/callback/logdna.py +++ b/plugins/callback/logdna.py @@ -1,61 +1,59 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Samir Musali # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: logdna - type: notification - short_description: Sends playbook logs to LogDNA - description: - - This callback will report logs from playbook actions, tasks, and events to LogDNA (U(https://app.logdna.com)). - requirements: - - LogDNA Python Library (U(https://github.com/logdna/python)) - - whitelisting in configuration - options: - conf_key: - required: true - description: LogDNA Ingestion Key. - type: string - env: - - name: LOGDNA_INGESTION_KEY - ini: - - section: callback_logdna - key: conf_key - plugin_ignore_errors: - required: false - description: Whether to ignore errors on failing or not. - type: boolean - env: - - name: ANSIBLE_IGNORE_ERRORS - ini: - - section: callback_logdna - key: plugin_ignore_errors - default: false - conf_hostname: - required: false - description: Alternative Host Name; the current host name by default. - type: string - env: - - name: LOGDNA_HOSTNAME - ini: - - section: callback_logdna - key: conf_hostname - conf_tags: - required: false - description: Tags. - type: string - env: - - name: LOGDNA_TAGS - ini: - - section: callback_logdna - key: conf_tags - default: ansible -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: logdna +type: notification +short_description: Sends playbook logs to LogDNA +description: + - This callback reports logs from playbook actions, tasks, and events to LogDNA (U(https://app.logdna.com)). +requirements: + - LogDNA Python Library (U(https://github.com/logdna/python)) + - whitelisting in configuration +options: + conf_key: + required: true + description: LogDNA Ingestion Key. + type: string + env: + - name: LOGDNA_INGESTION_KEY + ini: + - section: callback_logdna + key: conf_key + plugin_ignore_errors: + required: false + description: Whether to ignore errors on failing or not. + type: boolean + env: + - name: ANSIBLE_IGNORE_ERRORS + ini: + - section: callback_logdna + key: plugin_ignore_errors + default: false + conf_hostname: + required: false + description: Alternative Host Name; the current host name by default. + type: string + env: + - name: LOGDNA_HOSTNAME + ini: + - section: callback_logdna + key: conf_hostname + conf_tags: + required: false + description: Tags. + type: string + env: + - name: LOGDNA_TAGS + ini: + - section: callback_logdna + key: conf_tags + default: ansible +""" import logging import json @@ -73,7 +71,7 @@ except ImportError: # Getting MAC Address of system: def get_mac(): - mac = "%012x" % getnode() + mac = f"{getnode():012x}" return ":".join(map(lambda index: mac[index:index + 2], range(int(len(mac) / 2)))) @@ -161,7 +159,7 @@ class CallbackModule(CallbackBase): if ninvalidKeys > 0: for key in invalidKeys: del meta[key] - meta['__errors'] = 'These keys have been sanitized: ' + ', '.join(invalidKeys) + meta['__errors'] = f"These keys have been sanitized: {', '.join(invalidKeys)}" return meta def sanitizeJSON(self, data): diff --git a/plugins/callback/logentries.py b/plugins/callback/logentries.py index 22322a4df2..8fbcef4dd6 100644 --- a/plugins/callback/logentries.py +++ b/plugins/callback/logentries.py @@ -1,80 +1,80 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2015, Logentries.com, Jimmy Tang # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: logentries - type: notification - short_description: Sends events to Logentries +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: logentries +type: notification +short_description: Sends events to Logentries +description: + - This callback plugin generates JSON objects and send them to Logentries using TCP for auditing/debugging purposes. +requirements: + - whitelisting in configuration + - certifi (Python library) + - flatdict (Python library), if you want to use the O(flatten) option +options: + api: + description: URI to the Logentries API. + type: str + env: + - name: LOGENTRIES_API + default: data.logentries.com + ini: + - section: callback_logentries + key: api + port: + description: HTTP port to use when connecting to the API. + type: int + env: + - name: LOGENTRIES_PORT + default: 80 + ini: + - section: callback_logentries + key: port + tls_port: + description: Port to use when connecting to the API when TLS is enabled. + type: int + env: + - name: LOGENTRIES_TLS_PORT + default: 443 + ini: + - section: callback_logentries + key: tls_port + token: + description: The logentries C(TCP token). + type: str + env: + - name: LOGENTRIES_ANSIBLE_TOKEN + required: true + ini: + - section: callback_logentries + key: token + use_tls: description: - - This callback plugin will generate JSON objects and send them to Logentries via TCP for auditing/debugging purposes. - - Before 2.4, if you wanted to use an ini configuration, the file must be placed in the same directory as this plugin and named C(logentries.ini). - - In 2.4 and above you can just put it in the main Ansible configuration file. - requirements: - - whitelisting in configuration - - certifi (Python library) - - flatdict (Python library), if you want to use the 'flatten' option - options: - api: - description: URI to the Logentries API. - env: - - name: LOGENTRIES_API - default: data.logentries.com - ini: - - section: callback_logentries - key: api - port: - description: HTTP port to use when connecting to the API. - env: - - name: LOGENTRIES_PORT - default: 80 - ini: - - section: callback_logentries - key: port - tls_port: - description: Port to use when connecting to the API when TLS is enabled. - env: - - name: LOGENTRIES_TLS_PORT - default: 443 - ini: - - section: callback_logentries - key: tls_port - token: - description: The logentries C(TCP token). - env: - - name: LOGENTRIES_ANSIBLE_TOKEN - required: true - ini: - - section: callback_logentries - key: token - use_tls: - description: - - Toggle to decide whether to use TLS to encrypt the communications with the API server. - env: - - name: LOGENTRIES_USE_TLS - default: false - type: boolean - ini: - - section: callback_logentries - key: use_tls - flatten: - description: Flatten complex data structures into a single dictionary with complex keys. - type: boolean - default: false - env: - - name: LOGENTRIES_FLATTEN - ini: - - section: callback_logentries - key: flatten -''' + - Toggle to decide whether to use TLS to encrypt the communications with the API server. + env: + - name: LOGENTRIES_USE_TLS + default: false + type: boolean + ini: + - section: callback_logentries + key: use_tls + flatten: + description: Flatten complex data structures into a single dictionary with complex keys. + type: boolean + default: false + env: + - name: LOGENTRIES_FLATTEN + ini: + - section: callback_logentries + key: flatten +""" -EXAMPLES = ''' -examples: > +EXAMPLES = r""" +examples: >- To enable, add this to your ansible.cfg file in the defaults block [defaults] @@ -90,10 +90,10 @@ examples: > api = data.logentries.com port = 10000 tls_port = 20000 - use_tls = no + use_tls = true token = dd21fc88-f00a-43ff-b977-e3a4233c53af - flatten = False -''' + flatten = false +""" import os import socket @@ -131,7 +131,7 @@ class PlainTextSocketAppender(object): # Error message displayed when an incorrect Token has been detected self.INVALID_TOKEN = "\n\nIt appears the LOGENTRIES_TOKEN parameter you entered is incorrect!\n\n" # Unicode Line separator character \u2028 - self.LINE_SEP = u'\u2028' + self.LINE_SEP = '\u2028' self._display = display self._conn = None @@ -149,7 +149,7 @@ class PlainTextSocketAppender(object): self.open_connection() return except Exception as e: - self._display.vvvv(u"Unable to connect to Logentries: %s" % to_text(e)) + self._display.vvvv(f"Unable to connect to Logentries: {e}") root_delay *= 2 if root_delay > self.MAX_DELAY: @@ -158,7 +158,7 @@ class PlainTextSocketAppender(object): wait_for = root_delay + random.uniform(0, root_delay) try: - self._display.vvvv("sleeping %s before retry" % wait_for) + self._display.vvvv(f"sleeping {wait_for} before retry") time.sleep(wait_for) except KeyboardInterrupt: raise @@ -171,8 +171,8 @@ class PlainTextSocketAppender(object): # Replace newlines with Unicode line separator # for multi-line events data = to_text(data, errors='surrogate_or_strict') - multiline = data.replace(u'\n', self.LINE_SEP) - multiline += u"\n" + multiline = data.replace('\n', self.LINE_SEP) + multiline += "\n" # Send data, reconnect if needed while True: try: @@ -196,15 +196,11 @@ else: class TLSSocketAppender(PlainTextSocketAppender): def open_connection(self): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock = ssl.wrap_socket( + context = ssl.create_default_context( + purpose=ssl.Purpose.SERVER_AUTH, + cafile=certifi.where(), ) + sock = context.wrap_socket( sock=sock, - keyfile=None, - certfile=None, - server_side=False, - cert_reqs=ssl.CERT_REQUIRED, - ssl_version=getattr( - ssl, 'PROTOCOL_TLSv1_2', ssl.PROTOCOL_TLSv1), - ca_certs=certifi.where(), do_handshake_on_connect=True, suppress_ragged_eofs=True, ) sock.connect((self.LE_API, self.LE_TLS_PORT)) @@ -249,7 +245,7 @@ class CallbackModule(CallbackBase): self.use_tls = self.get_option('use_tls') self.flatten = self.get_option('flatten') except KeyError as e: - self._display.warning(u"Missing option for Logentries callback plugin: %s" % to_text(e)) + self._display.warning(f"Missing option for Logentries callback plugin: {e}") self.disabled = True try: @@ -268,10 +264,10 @@ class CallbackModule(CallbackBase): if not self.disabled: if self.use_tls: - self._display.vvvv("Connecting to %s:%s with TLS" % (self.api_url, self.api_tls_port)) + self._display.vvvv(f"Connecting to {self.api_url}:{self.api_tls_port} with TLS") self._appender = TLSSocketAppender(display=self._display, LE_API=self.api_url, LE_TLS_PORT=self.api_tls_port) else: - self._display.vvvv("Connecting to %s:%s" % (self.api_url, self.api_port)) + self._display.vvvv(f"Connecting to {self.api_url}:{self.api_port}") self._appender = PlainTextSocketAppender(display=self._display, LE_API=self.api_url, LE_PORT=self.api_port) self._appender.reopen_connection() @@ -284,7 +280,7 @@ class CallbackModule(CallbackBase): def emit(self, record): msg = record.rstrip('\n') - msg = "{0} {1}".format(self.token, msg) + msg = f"{self.token} {msg}" self._appender.put(msg) self._display.vvvv("Sent event to logentries") diff --git a/plugins/callback/logstash.py b/plugins/callback/logstash.py index 144e1f9915..f2279929f0 100644 --- a/plugins/callback/logstash.py +++ b/plugins/callback/logstash.py @@ -1,97 +1,98 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2020, Yevhen Khmelenko # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' - author: Yevhen Khmelenko (@ujenmr) - name: logstash - type: notification - short_description: Sends events to Logstash - description: - - This callback will report facts and task events to Logstash U(https://www.elastic.co/products/logstash). - requirements: - - whitelisting in configuration - - logstash (Python library) - options: - server: - description: Address of the Logstash server. - env: - - name: LOGSTASH_SERVER - ini: - - section: callback_logstash - key: server - version_added: 1.0.0 - default: localhost - port: - description: Port on which logstash is listening. - env: - - name: LOGSTASH_PORT - ini: - - section: callback_logstash - key: port - version_added: 1.0.0 - default: 5000 - type: - description: Message type. - env: - - name: LOGSTASH_TYPE - ini: - - section: callback_logstash - key: type - version_added: 1.0.0 - default: ansible - pre_command: - description: Executes command before run and its result is added to the C(ansible_pre_command_output) logstash field. - version_added: 2.0.0 - ini: - - section: callback_logstash - key: pre_command - env: - - name: LOGSTASH_PRE_COMMAND - format_version: - description: Logging format. - type: str - version_added: 2.0.0 - ini: - - section: callback_logstash - key: format_version - env: - - name: LOGSTASH_FORMAT_VERSION - default: v1 - choices: - - v1 - - v2 +DOCUMENTATION = r""" +author: Yevhen Khmelenko (@ujenmr) +name: logstash +type: notification +short_description: Sends events to Logstash +description: + - This callback reports facts and task events to Logstash U(https://www.elastic.co/products/logstash). +requirements: + - whitelisting in configuration + - logstash (Python library) +options: + server: + description: Address of the Logstash server. + type: str + env: + - name: LOGSTASH_SERVER + ini: + - section: callback_logstash + key: server + version_added: 1.0.0 + default: localhost + port: + description: Port on which logstash is listening. + type: int + env: + - name: LOGSTASH_PORT + ini: + - section: callback_logstash + key: port + version_added: 1.0.0 + default: 5000 + type: + description: Message type. + type: str + env: + - name: LOGSTASH_TYPE + ini: + - section: callback_logstash + key: type + version_added: 1.0.0 + default: ansible + pre_command: + description: Executes command before run and its result is added to the C(ansible_pre_command_output) logstash field. + type: str + version_added: 2.0.0 + ini: + - section: callback_logstash + key: pre_command + env: + - name: LOGSTASH_PRE_COMMAND + format_version: + description: Logging format. + type: str + version_added: 2.0.0 + ini: + - section: callback_logstash + key: format_version + env: + - name: LOGSTASH_FORMAT_VERSION + default: v1 + choices: + - v1 + - v2 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" ansible.cfg: | - # Enable Callback plugin - [defaults] - callback_whitelist = community.general.logstash + # Enable Callback plugin + [defaults] + callback_whitelist = community.general.logstash - [callback_logstash] - server = logstash.example.com - port = 5000 - pre_command = git rev-parse HEAD - type = ansible + [callback_logstash] + server = logstash.example.com + port = 5000 + pre_command = git rev-parse HEAD + type = ansible -11-input-tcp.conf: | - # Enable Logstash TCP Input - input { - tcp { - port => 5000 - codec => json - add_field => { "[@metadata][beat]" => "notify" } - add_field => { "[@metadata][type]" => "ansible" } - } - } -''' +11-input-tcp.conf: |- + # Enable Logstash TCP Input + input { + tcp { + port => 5000 + codec => json + add_field => { "[@metadata][beat]" => "notify" } + add_field => { "[@metadata][type]" => "ansible" } + } + } +""" import os import json @@ -99,7 +100,6 @@ from ansible import context import socket import uuid import logging -from datetime import datetime try: import logstash @@ -109,6 +109,10 @@ except ImportError: from ansible.plugins.callback import CallbackBase +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + class CallbackModule(CallbackBase): @@ -122,11 +126,9 @@ class CallbackModule(CallbackBase): if not HAS_LOGSTASH: self.disabled = True - self._display.warning("The required python-logstash/python3-logstash is not installed. " - "pip install python-logstash for Python 2" - "pip install python3-logstash for Python 3") + self._display.warning("The required python3-logstash is not installed.") - self.start_time = datetime.utcnow() + self.start_time = now() def _init_plugin(self): if not self.disabled: @@ -177,7 +179,7 @@ class CallbackModule(CallbackBase): data['status'] = "OK" data['ansible_playbook'] = playbook._file_name - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info( "START PLAYBOOK | %s", data['ansible_playbook'], extra=data ) @@ -185,7 +187,7 @@ class CallbackModule(CallbackBase): self.logger.info("ansible start", extra=data) def v2_playbook_on_stats(self, stats): - end_time = datetime.utcnow() + end_time = now() runtime = end_time - self.start_time summarize_stat = {} for host in stats.processed.keys(): @@ -202,7 +204,7 @@ class CallbackModule(CallbackBase): data['ansible_playbook_duration'] = runtime.total_seconds() data['ansible_result'] = json.dumps(summarize_stat) # deprecated field - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info( "FINISH PLAYBOOK | %s", json.dumps(summarize_stat), extra=data ) @@ -221,7 +223,7 @@ class CallbackModule(CallbackBase): data['ansible_play_id'] = self.play_id data['ansible_play_name'] = self.play_name - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info("START PLAY | %s", self.play_name, extra=data) else: self.logger.info("ansible play", extra=data) @@ -246,7 +248,7 @@ class CallbackModule(CallbackBase): data['ansible_task'] = task_name data['ansible_facts'] = self._dump_results(result._result) - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info( "SETUP FACTS | %s", self._dump_results(result._result), extra=data ) @@ -267,7 +269,7 @@ class CallbackModule(CallbackBase): data['ansible_task_id'] = self.task_id data['ansible_result'] = self._dump_results(result._result) - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info( "TASK OK | %s | RESULT | %s", task_name, self._dump_results(result._result), extra=data @@ -288,7 +290,7 @@ class CallbackModule(CallbackBase): data['ansible_task_id'] = self.task_id data['ansible_result'] = self._dump_results(result._result) - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info("TASK SKIPPED | %s", task_name, extra=data) else: self.logger.info("ansible skipped", extra=data) @@ -302,7 +304,7 @@ class CallbackModule(CallbackBase): data['ansible_play_name'] = self.play_name data['imported_file'] = imported_file - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info("IMPORT | %s", imported_file, extra=data) else: self.logger.info("ansible import", extra=data) @@ -316,7 +318,7 @@ class CallbackModule(CallbackBase): data['ansible_play_name'] = self.play_name data['imported_file'] = missing_file - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.info("NOT IMPORTED | %s", missing_file, extra=data) else: self.logger.info("ansible import", extra=data) @@ -340,7 +342,7 @@ class CallbackModule(CallbackBase): data['ansible_result'] = self._dump_results(result._result) self.errors += 1 - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.error( "TASK FAILED | %s | HOST | %s | RESULT | %s", task_name, self.hostname, @@ -363,7 +365,7 @@ class CallbackModule(CallbackBase): data['ansible_result'] = self._dump_results(result._result) self.errors += 1 - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.error( "UNREACHABLE | %s | HOST | %s | RESULT | %s", task_name, self.hostname, @@ -386,7 +388,7 @@ class CallbackModule(CallbackBase): data['ansible_result'] = self._dump_results(result._result) self.errors += 1 - if (self.ls_format_version == "v2"): + if self.ls_format_version == "v2": self.logger.error( "ASYNC FAILED | %s | HOST | %s | RESULT | %s", task_name, self.hostname, diff --git a/plugins/callback/mail.py b/plugins/callback/mail.py index a605d13eac..7afb08e3f0 100644 --- a/plugins/callback/mail.py +++ b/plugins/callback/mail.py @@ -1,77 +1,84 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2012, Dag Wieers # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" name: mail type: notification -short_description: Sends failure events via email +short_description: Sends failure events through email description: -- This callback will report failures via email. + - This callback reports failures through email. author: -- Dag Wieers (@dagwieers) + - Dag Wieers (@dagwieers) requirements: -- whitelisting in configuration + - whitelisting in configuration options: mta: description: - - Mail Transfer Agent, server that accepts SMTP. + - Mail Transfer Agent, server that accepts SMTP. type: str env: - - name: SMTPHOST + - name: SMTPHOST ini: - - section: callback_mail - key: smtphost + - section: callback_mail + key: smtphost default: localhost mtaport: description: - - Mail Transfer Agent Port. - - Port at which server SMTP. + - Mail Transfer Agent Port. + - Port at which server SMTP. type: int ini: - - section: callback_mail - key: smtpport + - section: callback_mail + key: smtpport default: 25 to: description: - - Mail recipient. + - Mail recipient. type: list elements: str ini: - - section: callback_mail - key: to + - section: callback_mail + key: to default: [root] sender: description: - - Mail sender. - - This is required since community.general 6.0.0. + - Mail sender. + - This is required since community.general 6.0.0. type: str required: true ini: - - section: callback_mail - key: sender + - section: callback_mail + key: sender cc: description: - - CC'd recipients. + - CC'd recipients. type: list elements: str ini: - - section: callback_mail - key: cc + - section: callback_mail + key: cc bcc: description: - - BCC'd recipients. + - BCC'd recipients. type: list elements: str ini: - - section: callback_mail - key: bcc -''' + - section: callback_mail + key: bcc + message_id_domain: + description: + - The domain name to use for the L(Message-ID header, https://en.wikipedia.org/wiki/Message-ID). + - The default is the hostname of the control node. + type: str + ini: + - section: callback_mail + key: message_id_domain + version_added: 8.2.0 +""" import json import os @@ -79,7 +86,6 @@ import re import email.utils import smtplib -from ansible.module_utils.six import string_types from ansible.module_utils.common.text.converters import to_bytes from ansible.parsing.ajson import AnsibleJSONEncoder from ansible.plugins.callback import CallbackBase @@ -126,14 +132,14 @@ class CallbackModule(CallbackBase): if self.bcc: bcc_addresses = email.utils.getaddresses(self.bcc) - content = 'Date: %s\n' % email.utils.formatdate() - content += 'From: %s\n' % email.utils.formataddr(sender_address) + content = f'Date: {email.utils.formatdate()}\n' + content += f'From: {email.utils.formataddr(sender_address)}\n' if self.to: - content += 'To: %s\n' % ', '.join([email.utils.formataddr(pair) for pair in to_addresses]) + content += f"To: {', '.join([email.utils.formataddr(pair) for pair in to_addresses])}\n" if self.cc: - content += 'Cc: %s\n' % ', '.join([email.utils.formataddr(pair) for pair in cc_addresses]) - content += 'Message-ID: %s\n' % email.utils.make_msgid() - content += 'Subject: %s\n\n' % subject.strip() + content += f"Cc: {', '.join([email.utils.formataddr(pair) for pair in cc_addresses])}\n" + content += f"Message-ID: {email.utils.make_msgid(domain=self.get_option('message_id_domain'))}\n" + content += f'Subject: {subject.strip()}\n\n' content += body addresses = to_addresses @@ -150,23 +156,22 @@ class CallbackModule(CallbackBase): smtp.quit() def subject_msg(self, multiline, failtype, linenr): - return '%s: %s' % (failtype, multiline.strip('\r\n').splitlines()[linenr]) + msg = multiline.strip('\r\n').splitlines()[linenr] + return f'{failtype}: {msg}' def indent(self, multiline, indent=8): return re.sub('^', ' ' * indent, multiline, flags=re.MULTILINE) def body_blob(self, multiline, texttype): ''' Turn some text output in a well-indented block for sending in a mail body ''' - intro = 'with the following %s:\n\n' % texttype - blob = '' - for line in multiline.strip('\r\n').splitlines(): - blob += '%s\n' % line - return intro + self.indent(blob) + '\n' + intro = f'with the following {texttype}:\n\n' + blob = "\n".join(multiline.strip('\r\n').splitlines()) + return f"{intro}{self.indent(blob)}\n" def mail_result(self, result, failtype): host = result._host.get_name() if not self.sender: - self.sender = '"Ansible: %s" ' % host + self.sender = f'"Ansible: {host}" ' # Add subject if self.itembody: @@ -182,31 +187,33 @@ class CallbackModule(CallbackBase): elif result._result.get('exception'): # Unrelated exceptions are added to output :-/ subject = self.subject_msg(result._result['exception'], failtype, -1) else: - subject = '%s: %s' % (failtype, result._task.name or result._task.action) + subject = f'{failtype}: {result._task.name or result._task.action}' # Make playbook name visible (e.g. in Outlook/Gmail condensed view) - body = 'Playbook: %s\n' % os.path.basename(self.playbook._file_name) + body = f'Playbook: {os.path.basename(self.playbook._file_name)}\n' if result._task.name: - body += 'Task: %s\n' % result._task.name - body += 'Module: %s\n' % result._task.action - body += 'Host: %s\n' % host + body += f'Task: {result._task.name}\n' + body += f'Module: {result._task.action}\n' + body += f'Host: {host}\n' body += '\n' # Add task information (as much as possible) body += 'The following task failed:\n\n' if 'invocation' in result._result: - body += self.indent('%s: %s\n' % (result._task.action, json.dumps(result._result['invocation']['module_args'], indent=4))) + body += self.indent(f"{result._task.action}: {json.dumps(result._result['invocation']['module_args'], indent=4)}\n") elif result._task.name: - body += self.indent('%s (%s)\n' % (result._task.name, result._task.action)) + body += self.indent(f'{result._task.name} ({result._task.action})\n') else: - body += self.indent('%s\n' % result._task.action) + body += self.indent(f'{result._task.action}\n') body += '\n' # Add item / message if self.itembody: body += self.itembody elif result._result.get('failed_when_result') is True: - body += "due to the following condition:\n\n" + self.indent('failed_when:\n- ' + '\n- '.join(result._task.failed_when)) + '\n\n' + fail_cond_list = '\n- '.join(result._task.failed_when) + fail_cond = self.indent(f"failed_when:\n- {fail_cond_list}") + body += f"due to the following condition:\n\n{fail_cond}\n\n" elif result._result.get('msg'): body += self.body_blob(result._result['msg'], 'message') @@ -219,13 +226,13 @@ class CallbackModule(CallbackBase): body += self.body_blob(result._result['exception'], 'exception') if result._result.get('warnings'): for i in range(len(result._result.get('warnings'))): - body += self.body_blob(result._result['warnings'][i], 'exception %d' % (i + 1)) + body += self.body_blob(result._result['warnings'][i], f'exception {i + 1}') if result._result.get('deprecations'): for i in range(len(result._result.get('deprecations'))): - body += self.body_blob(result._result['deprecations'][i], 'exception %d' % (i + 1)) + body += self.body_blob(result._result['deprecations'][i], f'exception {i + 1}') body += 'and a complete dump of the error:\n\n' - body += self.indent('%s: %s' % (failtype, json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4))) + body += self.indent(f'{failtype}: {json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4)}') self.mail(subject=subject, body=body) @@ -248,4 +255,4 @@ class CallbackModule(CallbackBase): def v2_runner_item_on_failed(self, result): # Pass item information to task failure self.itemsubject = result._result['msg'] - self.itembody += self.body_blob(json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4), "failed item dump '%(item)s'" % result._result) + self.itembody += self.body_blob(json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4), f"failed item dump '{result._result['item']}'") diff --git a/plugins/callback/nrdp.py b/plugins/callback/nrdp.py index 8295bf9759..6f1b5e2f5b 100644 --- a/plugins/callback/nrdp.py +++ b/plugins/callback/nrdp.py @@ -1,76 +1,72 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018 Remi Verchere # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: nrdp - type: notification - author: "Remi VERCHERE (@rverchere)" - short_description: Post task results to a Nagios server through nrdp - description: - - This callback send playbook result to Nagios. - - Nagios shall use NRDP to recive passive events. - - The passive check is sent to a dedicated host/service for Ansible. - options: - url: - description: URL of the nrdp server. - required: true - env: - - name : NRDP_URL - ini: - - section: callback_nrdp - key: url - type: string - validate_certs: - description: Validate the SSL certificate of the nrdp server. (Used for HTTPS URLs.) - env: - - name: NRDP_VALIDATE_CERTS - ini: - - section: callback_nrdp - key: validate_nrdp_certs - - section: callback_nrdp - key: validate_certs - type: boolean - default: false - aliases: [ validate_nrdp_certs ] - token: - description: Token to be allowed to push nrdp events. - required: true - env: - - name: NRDP_TOKEN - ini: - - section: callback_nrdp - key: token - type: string - hostname: - description: Hostname where the passive check is linked to. - required: true - env: - - name : NRDP_HOSTNAME - ini: - - section: callback_nrdp - key: hostname - type: string - servicename: - description: Service where the passive check is linked to. - required: true - env: - - name : NRDP_SERVICENAME - ini: - - section: callback_nrdp - key: servicename - type: string -''' +DOCUMENTATION = r""" +name: nrdp +type: notification +author: "Remi VERCHERE (@rverchere)" +short_description: Post task results to a Nagios server through nrdp +description: + - This callback send playbook result to Nagios. + - Nagios shall use NRDP to receive passive events. + - The passive check is sent to a dedicated host/service for Ansible. +options: + url: + description: URL of the nrdp server. + required: true + env: + - name: NRDP_URL + ini: + - section: callback_nrdp + key: url + type: string + validate_certs: + description: Validate the SSL certificate of the nrdp server. (Used for HTTPS URLs). + env: + - name: NRDP_VALIDATE_CERTS + ini: + - section: callback_nrdp + key: validate_nrdp_certs + - section: callback_nrdp + key: validate_certs + type: boolean + default: false + aliases: [validate_nrdp_certs] + token: + description: Token to be allowed to push nrdp events. + required: true + env: + - name: NRDP_TOKEN + ini: + - section: callback_nrdp + key: token + type: string + hostname: + description: Hostname where the passive check is linked to. + required: true + env: + - name: NRDP_HOSTNAME + ini: + - section: callback_nrdp + key: hostname + type: string + servicename: + description: Service where the passive check is linked to. + required: true + env: + - name: NRDP_SERVICENAME + ini: + - section: callback_nrdp + key: servicename + type: string +""" -import os -import json +from urllib.parse import urlencode -from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.common.text.converters import to_bytes from ansible.module_utils.urls import open_url from ansible.plugins.callback import CallbackBase @@ -135,10 +131,10 @@ class CallbackModule(CallbackBase): xmldata = "\n" xmldata += "\n" xmldata += "\n" - xmldata += "%s\n" % self.hostname - xmldata += "%s\n" % self.servicename - xmldata += "%d\n" % state - xmldata += "%s\n" % msg + xmldata += f"{self.hostname}\n" + xmldata += f"{self.servicename}\n" + xmldata += f"{state}\n" + xmldata += f"{msg}\n" xmldata += "\n" xmldata += "\n" @@ -155,7 +151,7 @@ class CallbackModule(CallbackBase): validate_certs=self.validate_nrdp_certs) return response.read() except Exception as ex: - self._display.warning("NRDP callback cannot send result {0}".format(ex)) + self._display.warning(f"NRDP callback cannot send result {ex}") def v2_playbook_on_play_start(self, play): ''' @@ -173,17 +169,16 @@ class CallbackModule(CallbackBase): critical = warning = 0 for host in hosts: stat = stats.summarize(host) - gstats += "'%s_ok'=%d '%s_changed'=%d \ - '%s_unreachable'=%d '%s_failed'=%d " % \ - (host, stat['ok'], host, stat['changed'], - host, stat['unreachable'], host, stat['failures']) + gstats += ( + f"'{host}_ok'={stat['ok']} '{host}_changed'={stat['changed']} '{host}_unreachable'={stat['unreachable']} '{host}_failed'={stat['failures']} " + ) # Critical when failed tasks or unreachable host critical += stat['failures'] critical += stat['unreachable'] # Warning when changed tasks warning += stat['changed'] - msg = "%s | %s" % (name, gstats) + msg = f"{name} | {gstats}" if critical: # Send Critical self._send_nrdp(self.CRITICAL, msg) diff --git a/plugins/callback/null.py b/plugins/callback/null.py index f53a242945..3074a698d0 100644 --- a/plugins/callback/null.py +++ b/plugins/callback/null.py @@ -1,22 +1,20 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: 'null' - type: stdout - requirements: - - set as main display callback - short_description: Don't display stuff to screen - description: - - This callback prevents outputing events to screen. -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: 'null' +type: stdout +requirements: + - set as main display callback +short_description: Do not display stuff to screen +description: + - This callback prevents outputting events to screen. +""" from ansible.plugins.callback import CallbackBase @@ -24,7 +22,7 @@ from ansible.plugins.callback import CallbackBase class CallbackModule(CallbackBase): ''' - This callback wont print messages to stdout when new callback events are received. + This callback won't print messages to stdout when new callback events are received. ''' CALLBACK_VERSION = 2.0 diff --git a/plugins/callback/opentelemetry.py b/plugins/callback/opentelemetry.py index e00e1d71ad..ca6ec2b916 100644 --- a/plugins/callback/opentelemetry.py +++ b/plugins/callback/opentelemetry.py @@ -1,87 +1,123 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2021, Victor Martinez # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Victor Martinez (@v1v) - name: opentelemetry - type: notification - short_description: Create distributed traces with OpenTelemetry - version_added: 3.7.0 +DOCUMENTATION = r""" +author: Victor Martinez (@v1v) +name: opentelemetry +type: notification +short_description: Create distributed traces with OpenTelemetry +version_added: 3.7.0 +description: + - This callback creates distributed traces for each Ansible task with OpenTelemetry. + - You can configure the OpenTelemetry exporter and SDK with environment variables. + - See U(https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html). + - See + U(https://opentelemetry-python.readthedocs.io/en/latest/sdk/environment_variables.html#opentelemetry-sdk-environment-variables). +options: + hide_task_arguments: + default: false + type: bool description: - - This callback creates distributed traces for each Ansible task with OpenTelemetry. - - You can configure the OpenTelemetry exporter and SDK with environment variables. - - See U(https://opentelemetry-python.readthedocs.io/en/latest/exporter/otlp/otlp.html). - - See U(https://opentelemetry-python.readthedocs.io/en/latest/sdk/environment_variables.html#opentelemetry-sdk-environment-variables). - options: - hide_task_arguments: - default: false - type: bool - description: - - Hide the arguments for a task. - env: - - name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS - ini: - - section: callback_opentelemetry - key: hide_task_arguments - version_added: 5.3.0 - enable_from_environment: - type: str - description: - - Whether to enable this callback only if the given environment variable exists and it is set to C(true). - - This is handy when you use Configuration as Code and want to send distributed traces - if running in the CI rather when running Ansible locally. - - For such, it evaluates the given I(enable_from_environment) value as environment variable - and if set to true this plugin will be enabled. - env: - - name: ANSIBLE_OPENTELEMETRY_ENABLE_FROM_ENVIRONMENT - ini: - - section: callback_opentelemetry - key: enable_from_environment - version_added: 5.3.0 - version_added: 3.8.0 - otel_service_name: - default: ansible - type: str - description: - - The service name resource attribute. - env: - - name: OTEL_SERVICE_NAME - ini: - - section: callback_opentelemetry - key: otel_service_name - version_added: 5.3.0 - traceparent: - default: None - type: str - description: - - The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header). - env: - - name: TRACEPARENT - disable_logs: - default: false - type: bool - description: - - Disable sending logs. - env: - - name: ANSIBLE_OPENTELEMETRY_DISABLE_LOGS - ini: - - section: callback_opentelemetry - key: disable_logs - version_added: 5.8.0 - requirements: - - opentelemetry-api (Python library) - - opentelemetry-exporter-otlp (Python library) - - opentelemetry-sdk (Python library) -''' + - Hide the arguments for a task. + env: + - name: ANSIBLE_OPENTELEMETRY_HIDE_TASK_ARGUMENTS + ini: + - section: callback_opentelemetry + key: hide_task_arguments + version_added: 5.3.0 + enable_from_environment: + type: str + description: + - Whether to enable this callback only if the given environment variable exists and it is set to V(true). + - This is handy when you use Configuration as Code and want to send distributed traces if running in the CI rather when + running Ansible locally. + - For such, it evaluates the given O(enable_from_environment) value as environment variable and if set to V(true) this + plugin is enabled. + env: + - name: ANSIBLE_OPENTELEMETRY_ENABLE_FROM_ENVIRONMENT + ini: + - section: callback_opentelemetry + key: enable_from_environment + version_added: 5.3.0 + version_added: 3.8.0 + otel_service_name: + default: ansible + type: str + description: + - The service name resource attribute. + env: + - name: OTEL_SERVICE_NAME + ini: + - section: callback_opentelemetry + key: otel_service_name + version_added: 5.3.0 + traceparent: + default: None + type: str + description: + - The L(W3C Trace Context header traceparent,https://www.w3.org/TR/trace-context-1/#traceparent-header). + env: + - name: TRACEPARENT + disable_logs: + default: false + type: bool + description: + - Disable sending logs. + env: + - name: ANSIBLE_OPENTELEMETRY_DISABLE_LOGS + ini: + - section: callback_opentelemetry + key: disable_logs + version_added: 5.8.0 + disable_attributes_in_logs: + default: false + type: bool + description: + - Disable populating span attributes to the logs. + env: + - name: ANSIBLE_OPENTELEMETRY_DISABLE_ATTRIBUTES_IN_LOGS + ini: + - section: callback_opentelemetry + key: disable_attributes_in_logs + version_added: 7.1.0 + store_spans_in_file: + type: str + description: + - It stores the exported spans in the given file. + env: + - name: ANSIBLE_OPENTELEMETRY_STORE_SPANS_IN_FILE + ini: + - section: callback_opentelemetry + key: store_spans_in_file + version_added: 9.0.0 + otel_exporter_otlp_traces_protocol: + type: str + description: + - E(OTEL_EXPORTER_OTLP_TRACES_PROTOCOL) represents the transport protocol for spans. + - See + U(https://opentelemetry-python.readthedocs.io/en/latest/sdk/environment_variables.html#envvar-OTEL_EXPORTER_OTLP_TRACES_PROTOCOL). + default: grpc + choices: + - grpc + - http/protobuf + env: + - name: OTEL_EXPORTER_OTLP_TRACES_PROTOCOL + ini: + - section: callback_opentelemetry + key: otel_exporter_otlp_traces_protocol + version_added: 9.0.0 +requirements: + - opentelemetry-api (Python library) + - opentelemetry-exporter-otlp (Python library) + - opentelemetry-sdk (Python library) +""" -EXAMPLES = ''' -examples: | +EXAMPLES = r""" +examples: |- Enable the plugin in ansible.cfg: [defaults] callbacks_enabled = community.general.opentelemetry @@ -93,60 +129,44 @@ examples: | export OTEL_EXPORTER_OTLP_HEADERS="authorization=Bearer your_otel_token" export OTEL_SERVICE_NAME=your_service_name export ANSIBLE_OPENTELEMETRY_ENABLED=true -''' +""" import getpass +import json import os import socket -import sys -import time import uuid - from collections import OrderedDict from os.path import basename +from time import time_ns +from urllib.parse import urlparse from ansible.errors import AnsibleError -from ansible.module_utils.six import raise_from -from ansible.module_utils.six.moves.urllib.parse import urlparse +from ansible.module_utils.ansible_release import __version__ as ansible_version from ansible.plugins.callback import CallbackBase try: from opentelemetry import trace from opentelemetry.trace import SpanKind - from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter + from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter as GRPCOTLPSpanExporter + from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter as HTTPOTLPSpanExporter from opentelemetry.sdk.resources import SERVICE_NAME, Resource from opentelemetry.trace.status import Status, StatusCode from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import ( - BatchSpanProcessor + BatchSpanProcessor, + SimpleSpanProcessor + ) + from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( + InMemorySpanExporter ) - - # Support for opentelemetry-api <= 1.12 - try: - from opentelemetry.util._time import _time_ns - except ImportError as imp_exc: - OTEL_LIBRARY_TIME_NS_ERROR = imp_exc - else: - OTEL_LIBRARY_TIME_NS_ERROR = None - except ImportError as imp_exc: OTEL_LIBRARY_IMPORT_ERROR = imp_exc - OTEL_LIBRARY_TIME_NS_ERROR = imp_exc else: OTEL_LIBRARY_IMPORT_ERROR = None -if sys.version_info >= (3, 7): - time_ns = time.time_ns -elif not OTEL_LIBRARY_TIME_NS_ERROR: - time_ns = _time_ns -else: - def time_ns(): - # Support versions older than 3.7 with opentelemetry-api > 1.12 - return int(time.time() * 1e9) - - class TaskData: """ Data about an individual task. @@ -167,7 +187,7 @@ class TaskData: if host.uuid in self.host_data: if host.status == 'included': # concatenate task include output from multiple items - host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result) + host.result = f'{self.host_data[host.uuid].result}\n{host.result}' else: return @@ -190,7 +210,6 @@ class HostData: class OpenTelemetrySource(object): def __init__(self, display): self.ansible_playbook = "" - self.ansible_version = None self.session = str(uuid.uuid4()) self.host = socket.gethostname() try: @@ -238,13 +257,19 @@ class OpenTelemetrySource(object): task = tasks_data[task_uuid] - if self.ansible_version is None and hasattr(result, '_task_fields') and result._task_fields['args'].get('_ansible_version'): - self.ansible_version = result._task_fields['args'].get('_ansible_version') - task.dump = dump task.add_host(HostData(host_uuid, host_name, status, result)) - def generate_distributed_traces(self, otel_service_name, ansible_playbook, tasks_data, status, traceparent, disable_logs): + def generate_distributed_traces(self, + otel_service_name, + ansible_playbook, + tasks_data, + status, + traceparent, + disable_logs, + disable_attributes_in_logs, + otel_exporter_otlp_traces_protocol, + store_spans_in_file): """ generate distributed traces from the collected TaskData and HostData """ tasks = [] @@ -260,7 +285,16 @@ class OpenTelemetrySource(object): ) ) - processor = BatchSpanProcessor(OTLPSpanExporter()) + otel_exporter = None + if store_spans_in_file: + otel_exporter = InMemorySpanExporter() + processor = SimpleSpanProcessor(otel_exporter) + else: + if otel_exporter_otlp_traces_protocol == 'grpc': + otel_exporter = GRPCOTLPSpanExporter() + else: + otel_exporter = HTTPOTLPSpanExporter() + processor = BatchSpanProcessor(otel_exporter) trace.get_tracer_provider().add_span_processor(processor) @@ -270,8 +304,7 @@ class OpenTelemetrySource(object): start_time=parent_start_time, kind=SpanKind.SERVER) as parent: parent.set_status(status) # Populate trace metadata attributes - if self.ansible_version is not None: - parent.set_attribute("ansible.version", self.ansible_version) + parent.set_attribute("ansible.version", ansible_version) parent.set_attribute("ansible.session", self.session) parent.set_attribute("ansible.host.name", self.host) if self.ip_address is not None: @@ -280,12 +313,14 @@ class OpenTelemetrySource(object): for task in tasks: for host_uuid, host_data in task.host_data.items(): with tracer.start_as_current_span(task.name, start_time=task.start, end_on_exit=False) as span: - self.update_span_data(task, host_data, span, disable_logs) + self.update_span_data(task, host_data, span, disable_logs, disable_attributes_in_logs) - def update_span_data(self, task_data, host_data, span, disable_logs): + return otel_exporter + + def update_span_data(self, task_data, host_data, span, disable_logs, disable_attributes_in_logs): """ update the span with the given TaskData and HostData """ - name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name) + name = f'[{host_data.name}] {task_data.play}: {task_data.name}' message = 'success' res = {} @@ -293,6 +328,7 @@ class OpenTelemetrySource(object): status = Status(status_code=StatusCode.OK) if host_data.status != 'included': # Support loops + enriched_error_message = None if 'results' in host_data.result._result: if host_data.status == 'failed': message = self.get_error_message_from_results(host_data.result._result['results'], task_data.action) @@ -315,39 +351,48 @@ class OpenTelemetrySource(object): status = Status(status_code=StatusCode.UNSET) span.set_status(status) + + # Create the span and log attributes + attributes = { + "ansible.task.module": task_data.action, + "ansible.task.message": message, + "ansible.task.name": name, + "ansible.task.result": rc, + "ansible.task.host.name": host_data.name, + "ansible.task.host.status": host_data.status + } if isinstance(task_data.args, dict) and "gather_facts" not in task_data.action: names = tuple(self.transform_ansible_unicode_to_str(k) for k in task_data.args.keys()) values = tuple(self.transform_ansible_unicode_to_str(k) for k in task_data.args.values()) - self.set_span_attribute(span, ("ansible.task.args.name"), names) - self.set_span_attribute(span, ("ansible.task.args.value"), values) - self.set_span_attribute(span, "ansible.task.module", task_data.action) - self.set_span_attribute(span, "ansible.task.message", message) - self.set_span_attribute(span, "ansible.task.name", name) - self.set_span_attribute(span, "ansible.task.result", rc) - self.set_span_attribute(span, "ansible.task.host.name", host_data.name) - self.set_span_attribute(span, "ansible.task.host.status", host_data.status) + attributes[("ansible.task.args.name")] = names + attributes[("ansible.task.args.value")] = values + + self.set_span_attributes(span, attributes) + # This will allow to enrich the service map self.add_attributes_for_service_map_if_possible(span, task_data) # Send logs if not disable_logs: - span.add_event(task_data.dump) + # This will avoid populating span attributes to the logs + span.add_event(task_data.dump, attributes={} if disable_attributes_in_logs else attributes) + # Close span always span.end(end_time=host_data.finish) - def set_span_attribute(self, span, attributeName, attributeValue): - """ update the span attribute with the given attribute and value if not None """ + def set_span_attributes(self, span, attributes): + """ update the span attributes with the given attributes if not None """ if span is None and self._display is not None: self._display.warning('span object is None. Please double check if that is expected.') else: - if attributeValue is not None: - span.set_attribute(attributeName, attributeValue) + if attributes is not None: + span.set_attributes(attributes) def add_attributes_for_service_map_if_possible(self, span, task_data): """Update the span attributes with the service that the task interacted with, if possible.""" redacted_url = self.parse_and_redact_url_if_possible(task_data.args) if redacted_url: - self.set_span_attribute(span, "http.url", redacted_url.geturl()) + span.set_attribute("http.url", redacted_url.geturl()) @staticmethod def parse_and_redact_url_if_possible(args): @@ -398,7 +443,7 @@ class OpenTelemetrySource(object): def get_error_message_from_results(results, action): for result in results: if result.get('failed', False): - return ('{0}({1}) - {2}').format(action, result.get('item', 'none'), OpenTelemetrySource.get_error_message(result)) + return f"{action}({result.get('item', 'none')}) - {OpenTelemetrySource.get_error_message(result)}" @staticmethod def _last_line(text): @@ -410,14 +455,14 @@ class OpenTelemetrySource(object): message = result.get('msg', 'failed') exception = result.get('exception') stderr = result.get('stderr') - return ('message: "{0}"\nexception: "{1}"\nstderr: "{2}"').format(message, exception, stderr) + return f"message: \"{message}\"\nexception: \"{exception}\"\nstderr: \"{stderr}\"" @staticmethod def enrich_error_message_from_results(results, action): message = "" for result in results: if result.get('failed', False): - message = ('{0}({1}) - {2}\n{3}').format(action, result.get('item', 'none'), OpenTelemetrySource.enrich_error_message(result), message) + message = f"{action}({result.get('item', 'none')}) - {OpenTelemetrySource.enrich_error_message(result)}\n{message}" return message @@ -434,6 +479,7 @@ class CallbackModule(CallbackBase): def __init__(self, display=None): super(CallbackModule, self).__init__(display=display) self.hide_task_arguments = None + self.disable_attributes_in_logs = None self.disable_logs = None self.otel_service_name = None self.ansible_playbook = None @@ -442,11 +488,13 @@ class CallbackModule(CallbackBase): self.errors = 0 self.disabled = False self.traceparent = False + self.store_spans_in_file = False + self.otel_exporter_otlp_traces_protocol = None if OTEL_LIBRARY_IMPORT_ERROR: - raise_from( - AnsibleError('The `opentelemetry-api`, `opentelemetry-exporter-otlp` or `opentelemetry-sdk` must be installed to use this plugin'), - OTEL_LIBRARY_IMPORT_ERROR) + raise AnsibleError( + 'The `opentelemetry-api`, `opentelemetry-exporter-otlp` or `opentelemetry-sdk` must be installed to use this plugin' + ) from OTEL_LIBRARY_IMPORT_ERROR self.tasks_data = OrderedDict() @@ -460,13 +508,18 @@ class CallbackModule(CallbackBase): environment_variable = self.get_option('enable_from_environment') if environment_variable is not None and os.environ.get(environment_variable, 'false').lower() != 'true': self.disabled = True - self._display.warning("The `enable_from_environment` option has been set and {0} is not enabled. " - "Disabling the `opentelemetry` callback plugin.".format(environment_variable)) + self._display.warning( + f"The `enable_from_environment` option has been set and {environment_variable} is not enabled. Disabling the `opentelemetry` callback plugin." + ) self.hide_task_arguments = self.get_option('hide_task_arguments') + self.disable_attributes_in_logs = self.get_option('disable_attributes_in_logs') + self.disable_logs = self.get_option('disable_logs') + self.store_spans_in_file = self.get_option('store_spans_in_file') + self.otel_service_name = self.get_option('otel_service_name') if not self.otel_service_name: @@ -475,6 +528,22 @@ class CallbackModule(CallbackBase): # See https://github.com/open-telemetry/opentelemetry-specification/issues/740 self.traceparent = self.get_option('traceparent') + self.otel_exporter_otlp_traces_protocol = self.get_option('otel_exporter_otlp_traces_protocol') + + def dump_results(self, task, result): + """ dump the results if disable_logs is not enabled """ + if self.disable_logs: + return "" + # ansible.builtin.uri contains the response in the json field + save = dict(result._result) + + if "json" in save and task.action in ("ansible.builtin.uri", "ansible.legacy.uri", "uri"): + save.pop("json") + # ansible.builtin.slurp contains the response in the content field + if "content" in save and task.action in ("ansible.builtin.slurp", "ansible.legacy.slurp", "slurp"): + save.pop("content") + return self._dump_results(save) + def v2_playbook_on_start(self, playbook): self.ansible_playbook = basename(playbook._file_name) @@ -524,7 +593,7 @@ class CallbackModule(CallbackBase): self.tasks_data, status, result, - self._dump_results(result._result) + self.dump_results(self.tasks_data[result._task._uuid], result) ) def v2_runner_on_ok(self, result): @@ -532,7 +601,7 @@ class CallbackModule(CallbackBase): self.tasks_data, 'ok', result, - self._dump_results(result._result) + self.dump_results(self.tasks_data[result._task._uuid], result) ) def v2_runner_on_skipped(self, result): @@ -540,7 +609,7 @@ class CallbackModule(CallbackBase): self.tasks_data, 'skipped', result, - self._dump_results(result._result) + self.dump_results(self.tasks_data[result._task._uuid], result) ) def v2_playbook_on_include(self, included_file): @@ -556,14 +625,22 @@ class CallbackModule(CallbackBase): status = Status(status_code=StatusCode.OK) else: status = Status(status_code=StatusCode.ERROR) - self.opentelemetry.generate_distributed_traces( + otel_exporter = self.opentelemetry.generate_distributed_traces( self.otel_service_name, self.ansible_playbook, self.tasks_data, status, self.traceparent, - self.disable_logs + self.disable_logs, + self.disable_attributes_in_logs, + self.otel_exporter_otlp_traces_protocol, + self.store_spans_in_file ) + if self.store_spans_in_file: + spans = [json.loads(span.to_json()) for span in otel_exporter.get_finished_spans()] + with open(self.store_spans_in_file, "w", encoding="utf-8") as output: + json.dump({"spans": spans}, output, indent=4) + def v2_runner_on_async_failed(self, result, **kwargs): self.errors += 1 diff --git a/plugins/callback/print_task.py b/plugins/callback/print_task.py new file mode 100644 index 0000000000..f6008c817f --- /dev/null +++ b/plugins/callback/print_task.py @@ -0,0 +1,62 @@ +# Copyright (c) 2025, Max Mitschke +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: print_task +type: aggregate +short_description: Prints playbook task snippet to job output +description: + - This plugin prints the currently executing playbook task to the job output. +version_added: 10.7.0 +requirements: + - enable in configuration +""" + +EXAMPLES = r""" +ansible.cfg: |- + # Enable plugin + [defaults] + callbacks_enabled=community.general.print_task +""" + +from yaml import load, dump + +try: + from yaml import CSafeDumper as SafeDumper + from yaml import CSafeLoader as SafeLoader +except ImportError: + from yaml import SafeDumper, SafeLoader + +from ansible.plugins.callback import CallbackBase + + +class CallbackModule(CallbackBase): + """ + This callback module tells you how long your plays ran for. + """ + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'aggregate' + CALLBACK_NAME = 'community.general.print_task' + + CALLBACK_NEEDS_ENABLED = True + + def __init__(self): + super(CallbackModule, self).__init__() + self._printed_message = False + + def _print_task(self, task): + if hasattr(task, '_ds'): + task_snippet = load(str([task._ds.copy()]), Loader=SafeLoader) + task_yaml = dump(task_snippet, sort_keys=False, Dumper=SafeDumper) + self._display.display(f"\n{task_yaml}\n") + self._printed_message = True + + def v2_playbook_on_task_start(self, task, is_conditional): + self._printed_message = False + + def v2_runner_on_start(self, host, task): + if not self._printed_message: + self._print_task(task) diff --git a/plugins/callback/say.py b/plugins/callback/say.py index 005725a22b..0455ee69e6 100644 --- a/plugins/callback/say.py +++ b/plugins/callback/say.py @@ -1,26 +1,22 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2012, Michael DeHaan, # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: say - type: notification - requirements: - - whitelisting in configuration - - the C(/usr/bin/say) command line program (standard on macOS) or C(espeak) command line program - short_description: notify using software speech synthesizer - description: - - This plugin will use the C(say) or C(espeak) program to "speak" about play events. - notes: - - In Ansible 2.8, this callback has been renamed from C(osx_say) into M(community.general.say). -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: say +type: notification +requirements: + - whitelisting in configuration + - the C(/usr/bin/say) command line program (standard on macOS) or C(espeak) command line program +short_description: Notify using software speech synthesizer +description: + - This plugin uses C(say) or C(espeak) to "speak" about play events. +""" import platform import subprocess @@ -52,7 +48,7 @@ class CallbackModule(CallbackBase): self.synthesizer = get_bin_path('say') if platform.system() != 'Darwin': # 'say' binary available, it might be GNUstep tool which doesn't support 'voice' parameter - self._display.warning("'say' executable found but system is '%s': ignoring voice parameter" % platform.system()) + self._display.warning(f"'say' executable found but system is '{platform.system()}': ignoring voice parameter") else: self.FAILED_VOICE = 'Zarvox' self.REGULAR_VOICE = 'Trinoids' @@ -71,7 +67,7 @@ class CallbackModule(CallbackBase): # ansible will not call any callback if disabled is set to True if not self.synthesizer: self.disabled = True - self._display.warning("Unable to find either 'say' or 'espeak' executable, plugin %s disabled" % os.path.basename(__file__)) + self._display.warning(f"Unable to find either 'say' or 'espeak' executable, plugin {os.path.basename(__file__)} disabled") def say(self, msg, voice): cmd = [self.synthesizer, msg] @@ -80,7 +76,7 @@ class CallbackModule(CallbackBase): subprocess.call(cmd) def runner_on_failed(self, host, res, ignore_errors=False): - self.say("Failure on host %s" % host, self.FAILED_VOICE) + self.say(f"Failure on host {host}", self.FAILED_VOICE) def runner_on_ok(self, host, res): self.say("pew", self.LASER_VOICE) @@ -89,13 +85,13 @@ class CallbackModule(CallbackBase): self.say("pew", self.LASER_VOICE) def runner_on_unreachable(self, host, res): - self.say("Failure on host %s" % host, self.FAILED_VOICE) + self.say(f"Failure on host {host}", self.FAILED_VOICE) def runner_on_async_ok(self, host, res, jid): self.say("pew", self.LASER_VOICE) def runner_on_async_failed(self, host, res, jid): - self.say("Failure on host %s" % host, self.FAILED_VOICE) + self.say(f"Failure on host {host}", self.FAILED_VOICE) def playbook_on_start(self): self.say("Running Playbook", self.REGULAR_VOICE) @@ -105,15 +101,15 @@ class CallbackModule(CallbackBase): def playbook_on_task_start(self, name, is_conditional): if not is_conditional: - self.say("Starting task: %s" % name, self.REGULAR_VOICE) + self.say(f"Starting task: {name}", self.REGULAR_VOICE) else: - self.say("Notifying task: %s" % name, self.REGULAR_VOICE) + self.say(f"Notifying task: {name}", self.REGULAR_VOICE) def playbook_on_setup(self): self.say("Gathering facts", self.REGULAR_VOICE) def playbook_on_play_start(self, name): - self.say("Starting play: %s" % name, self.HAPPY_VOICE) + self.say(f"Starting play: {name}", self.HAPPY_VOICE) def playbook_on_stats(self, stats): self.say("Play complete", self.HAPPY_VOICE) diff --git a/plugins/callback/selective.py b/plugins/callback/selective.py index 526975bd2c..2a7dd07a3e 100644 --- a/plugins/callback/selective.py +++ b/plugins/callback/selective.py @@ -1,41 +1,39 @@ -# -*- coding: utf-8 -*- # Copyright (c) Fastly, inc 2016 # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: selective - type: stdout - requirements: - - set as main display callback - short_description: only print certain tasks - description: - - This callback only prints tasks that have been tagged with C(print_action) or that have failed. - This allows operators to focus on the tasks that provide value only. - - Tasks that are not printed are placed with a C(.). - - If you increase verbosity all tasks are printed. - options: - nocolor: - default: false - description: This setting allows suppressing colorizing output. - env: - - name: ANSIBLE_NOCOLOR - - name: ANSIBLE_SELECTIVE_DONT_COLORIZE - ini: - - section: defaults - key: nocolor - type: boolean -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: selective +type: stdout +requirements: + - set as main display callback +short_description: Only print certain tasks +description: + - This callback only prints tasks that have been tagged with C(print_action) or that have failed. This allows operators + to focus on the tasks that provide value only. + - Tasks that are not printed are placed with a C(.). + - If you increase verbosity all tasks are printed. +options: + nocolor: + default: false + description: This setting allows suppressing colorizing output. + env: + - name: ANSIBLE_NOCOLOR + - name: ANSIBLE_SELECTIVE_DONT_COLORIZE + ini: + - section: defaults + key: nocolor + type: boolean +""" -EXAMPLES = """ - - ansible.builtin.debug: msg="This will not be printed" - - ansible.builtin.debug: msg="But this will" - tags: [print_action] +EXAMPLES = r""" +- ansible.builtin.debug: msg="This will not be printed" +- ansible.builtin.debug: msg="But this will" + tags: [print_action] """ import difflib @@ -44,26 +42,17 @@ from ansible import constants as C from ansible.plugins.callback import CallbackBase from ansible.module_utils.common.text.converters import to_text -try: - codeCodes = C.COLOR_CODES -except AttributeError: - # This constant was moved to ansible.constants in - # https://github.com/ansible/ansible/commit/1202dd000f10b0e8959019484f1c3b3f9628fc67 - # (will be included in ansible-core 2.11.0). For older Ansible/ansible-base versions, - # we include from the original location. - from ansible.utils.color import codeCodes - DONT_COLORIZE = False COLORS = { 'normal': '\033[0m', - 'ok': '\033[{0}m'.format(codeCodes[C.COLOR_OK]), + 'ok': f'\x1b[{C.COLOR_CODES[C.COLOR_OK]}m', 'bold': '\033[1m', 'not_so_bold': '\033[1m\033[34m', - 'changed': '\033[{0}m'.format(codeCodes[C.COLOR_CHANGED]), - 'failed': '\033[{0}m'.format(codeCodes[C.COLOR_ERROR]), + 'changed': f'\x1b[{C.COLOR_CODES[C.COLOR_CHANGED]}m', + 'failed': f'\x1b[{C.COLOR_CODES[C.COLOR_ERROR]}m', 'endc': '\033[0m', - 'skipped': '\033[{0}m'.format(codeCodes[C.COLOR_SKIP]), + 'skipped': f'\x1b[{C.COLOR_CODES[C.COLOR_SKIP]}m', } @@ -82,7 +71,7 @@ def colorize(msg, color): if DONT_COLORIZE: return msg else: - return '{0}{1}{2}'.format(COLORS[color], msg, COLORS['endc']) + return f"{COLORS[color]}{msg}{COLORS['endc']}" class CallbackModule(CallbackBase): @@ -115,15 +104,15 @@ class CallbackModule(CallbackBase): line_length = 120 if self.last_skipped: print() - msg = colorize("# {0} {1}".format(task_name, - '*' * (line_length - len(task_name))), 'bold') + line = f"# {task_name} " + msg = colorize(f"{line}{'*' * (line_length - len(line))}", 'bold') print(msg) def _indent_text(self, text, indent_level): lines = text.splitlines() result_lines = [] for l in lines: - result_lines.append("{0}{1}".format(' ' * indent_level, l)) + result_lines.append(f"{' ' * indent_level}{l}") return '\n'.join(result_lines) def _print_diff(self, diff, indent_level): @@ -156,19 +145,19 @@ class CallbackModule(CallbackBase): change_string = colorize('FAILED!!!', color) else: color = 'changed' if changed else 'ok' - change_string = colorize("changed={0}".format(changed), color) + change_string = colorize(f"changed={changed}", color) msg = colorize(msg, color) line_length = 120 spaces = ' ' * (40 - len(name) - indent_level) - line = "{0} * {1}{2}- {3}".format(' ' * indent_level, name, spaces, change_string) + line = f"{' ' * indent_level} * {name}{spaces}- {change_string}" if len(msg) < 50: - line += ' -- {0}'.format(msg) - print("{0} {1}---------".format(line, '-' * (line_length - len(line)))) + line += f' -- {msg}' + print(f"{line} {'-' * (line_length - len(line))}---------") else: - print("{0} {1}".format(line, '-' * (line_length - len(line)))) + print(f"{line} {'-' * (line_length - len(line))}") print(self._indent_text(msg, indent_level + 4)) if diff: @@ -218,7 +207,7 @@ class CallbackModule(CallbackBase): stderr = [r.get('exception', None), r.get('module_stderr', None)] stderr = "\n".join([e for e in stderr if e]).strip() - self._print_host_or_item(r['item'], + self._print_host_or_item(r[r['ansible_loop_var']], r.get('changed', False), to_text(r.get('msg', '')), r.get('diff', None), @@ -248,8 +237,10 @@ class CallbackModule(CallbackBase): else: color = 'ok' - msg = '{0} : ok={1}\tchanged={2}\tfailed={3}\tunreachable={4}\trescued={5}\tignored={6}'.format( - host, s['ok'], s['changed'], s['failures'], s['unreachable'], s['rescued'], s['ignored']) + msg = ( + f"{host} : ok={s['ok']}\tchanged={s['changed']}\tfailed={s['failures']}\tunreachable=" + f"{s['unreachable']}\trescued={s['rescued']}\tignored={s['ignored']}" + ) print(colorize(msg, color)) def v2_runner_on_skipped(self, result, **kwargs): @@ -261,17 +252,15 @@ class CallbackModule(CallbackBase): line_length = 120 spaces = ' ' * (31 - len(result._host.name) - 4) - line = " * {0}{1}- {2}".format(colorize(result._host.name, 'not_so_bold'), - spaces, - colorize("skipped", 'skipped'),) + line = f" * {colorize(result._host.name, 'not_so_bold')}{spaces}- {colorize('skipped', 'skipped')}" reason = result._result.get('skipped_reason', '') or \ result._result.get('skip_reason', '') if len(reason) < 50: - line += ' -- {0}'.format(reason) - print("{0} {1}---------".format(line, '-' * (line_length - len(line)))) + line += f' -- {reason}' + print(f"{line} {'-' * (line_length - len(line))}---------") else: - print("{0} {1}".format(line, '-' * (line_length - len(line)))) + print(f"{line} {'-' * (line_length - len(line))}") print(self._indent_text(reason, 8)) print(reason) diff --git a/plugins/callback/slack.py b/plugins/callback/slack.py index e9b84bbb38..e1d95abe06 100644 --- a/plugins/callback/slack.py +++ b/plugins/callback/slack.py @@ -1,66 +1,70 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2014-2015, Matt Martz # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: slack - type: notification - requirements: - - whitelist in configuration - - prettytable (python library) - short_description: Sends play events to a Slack channel +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: slack +type: notification +requirements: + - whitelist in configuration + - prettytable (python library) +short_description: Sends play events to a Slack channel +description: + - This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution. +options: + http_agent: description: - - This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution. - - Before Ansible 2.4 only environment variables were available for configuring this plugin. - options: - webhook_url: - required: true - description: Slack Webhook URL. - env: - - name: SLACK_WEBHOOK_URL - ini: - - section: callback_slack - key: webhook_url - channel: - default: "#ansible" - description: Slack room to post in. - env: - - name: SLACK_CHANNEL - ini: - - section: callback_slack - key: channel - username: - description: Username to post as. - env: - - name: SLACK_USERNAME - default: ansible - ini: - - section: callback_slack - key: username - validate_certs: - description: Validate the SSL certificate of the Slack server for HTTPS URLs. - env: - - name: SLACK_VALIDATE_CERTS - ini: - - section: callback_slack - key: validate_certs - default: true - type: bool -''' + - HTTP user agent to use for requests to Slack. + type: string + version_added: "10.5.0" + webhook_url: + required: true + description: Slack Webhook URL. + type: str + env: + - name: SLACK_WEBHOOK_URL + ini: + - section: callback_slack + key: webhook_url + channel: + default: "#ansible" + description: Slack room to post in. + type: str + env: + - name: SLACK_CHANNEL + ini: + - section: callback_slack + key: channel + username: + description: Username to post as. + type: str + env: + - name: SLACK_USERNAME + default: ansible + ini: + - section: callback_slack + key: username + validate_certs: + description: Validate the SSL certificate of the Slack server for HTTPS URLs. + env: + - name: SLACK_VALIDATE_CERTS + ini: + - section: callback_slack + key: validate_certs + default: true + type: bool +""" import json import os import uuid from ansible import context -from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.urls import open_url from ansible.plugins.callback import CallbackBase @@ -106,7 +110,7 @@ class CallbackModule(CallbackBase): self.username = self.get_option('username') self.show_invocation = (self._display.verbosity > 1) self.validate_certs = self.get_option('validate_certs') - + self.http_agent = self.get_option('http_agent') if self.webhook_url is None: self.disabled = True self._display.warning('Slack Webhook URL was not provided. The ' @@ -132,18 +136,22 @@ class CallbackModule(CallbackBase): self._display.debug(data) self._display.debug(self.webhook_url) try: - response = open_url(self.webhook_url, data=data, validate_certs=self.validate_certs, - headers=headers) + response = open_url( + self.webhook_url, + data=data, + validate_certs=self.validate_certs, + headers=headers, + http_agent=self.http_agent, + ) return response.read() except Exception as e: - self._display.warning(u'Could not submit message to Slack: %s' % - to_text(e)) + self._display.warning(f'Could not submit message to Slack: {e}') def v2_playbook_on_start(self, playbook): self.playbook_name = os.path.basename(playbook._file_name) title = [ - '*Playbook initiated* (_%s_)' % self.guid + f'*Playbook initiated* (_{self.guid}_)' ] invocation_items = [] @@ -154,23 +162,23 @@ class CallbackModule(CallbackBase): subset = context.CLIARGS['subset'] inventory = [os.path.abspath(i) for i in context.CLIARGS['inventory']] - invocation_items.append('Inventory: %s' % ', '.join(inventory)) + invocation_items.append(f"Inventory: {', '.join(inventory)}") if tags and tags != ['all']: - invocation_items.append('Tags: %s' % ', '.join(tags)) + invocation_items.append(f"Tags: {', '.join(tags)}") if skip_tags: - invocation_items.append('Skip Tags: %s' % ', '.join(skip_tags)) + invocation_items.append(f"Skip Tags: {', '.join(skip_tags)}") if subset: - invocation_items.append('Limit: %s' % subset) + invocation_items.append(f'Limit: {subset}') if extra_vars: - invocation_items.append('Extra Vars: %s' % - ' '.join(extra_vars)) + invocation_items.append(f"Extra Vars: {' '.join(extra_vars)}") - title.append('by *%s*' % context.CLIARGS['remote_user']) + title.append(f"by *{context.CLIARGS['remote_user']}*") - title.append('\n\n*%s*' % self.playbook_name) + title.append(f'\n\n*{self.playbook_name}*') msg_items = [' '.join(title)] if invocation_items: - msg_items.append('```\n%s\n```' % '\n'.join(invocation_items)) + _inv_item = '\n'.join(invocation_items) + msg_items.append(f'```\n{_inv_item}\n```') msg = '\n'.join(msg_items) @@ -190,8 +198,8 @@ class CallbackModule(CallbackBase): def v2_playbook_on_play_start(self, play): """Display Play start messages""" - name = play.name or 'Play name not specified (%s)' % play._uuid - msg = '*Starting play* (_%s_)\n\n*%s*' % (self.guid, name) + name = play.name or f'Play name not specified ({play._uuid})' + msg = f'*Starting play* (_{self.guid}_)\n\n*{name}*' attachments = [ { 'fallback': msg, @@ -226,7 +234,7 @@ class CallbackModule(CallbackBase): attachments = [] msg_items = [ - '*Playbook Complete* (_%s_)' % self.guid + f'*Playbook Complete* (_{self.guid}_)' ] if failures or unreachable: color = 'danger' @@ -235,7 +243,7 @@ class CallbackModule(CallbackBase): color = 'good' msg_items.append('\n*Success!*') - msg_items.append('```\n%s\n```' % t) + msg_items.append(f'```\n{t}\n```') msg = '\n'.join(msg_items) diff --git a/plugins/callback/splunk.py b/plugins/callback/splunk.py index 67ad944d2e..635a3109bc 100644 --- a/plugins/callback/splunk.py +++ b/plugins/callback/splunk.py @@ -1,76 +1,76 @@ -# -*- coding: utf-8 -*- # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: splunk - type: notification - short_description: Sends task result events to Splunk HTTP Event Collector - author: "Stuart Hirst (!UNKNOWN) " +DOCUMENTATION = r""" +name: splunk +type: notification +short_description: Sends task result events to Splunk HTTP Event Collector +author: "Stuart Hirst (!UNKNOWN) " +description: + - This callback plugin sends task results as JSON formatted events to a Splunk HTTP collector. + - The companion Splunk Monitoring & Diagnostics App is available here U(https://splunkbase.splunk.com/app/4023/). + - Credit to "Ryan Currah (@ryancurrah)" for original source upon which this is based. +requirements: + - Whitelisting this callback plugin + - 'Create a HTTP Event Collector in Splunk' + - 'Define the URL and token in C(ansible.cfg)' +options: + url: + description: URL to the Splunk HTTP collector source. + type: str + env: + - name: SPLUNK_URL + ini: + - section: callback_splunk + key: url + authtoken: + description: Token to authenticate the connection to the Splunk HTTP collector. + type: str + env: + - name: SPLUNK_AUTHTOKEN + ini: + - section: callback_splunk + key: authtoken + validate_certs: + description: Whether to validate certificates for connections to HEC. It is not recommended to set to V(false) except + when you are sure that nobody can intercept the connection between this plugin and HEC, as setting it to V(false) allows + man-in-the-middle attacks! + env: + - name: SPLUNK_VALIDATE_CERTS + ini: + - section: callback_splunk + key: validate_certs + type: bool + default: true + version_added: '1.0.0' + include_milliseconds: + description: Whether to include milliseconds as part of the generated timestamp field in the event sent to the Splunk + HTTP collector. + env: + - name: SPLUNK_INCLUDE_MILLISECONDS + ini: + - section: callback_splunk + key: include_milliseconds + type: bool + default: false + version_added: 2.0.0 + batch: description: - - This callback plugin will send task results as JSON formatted events to a Splunk HTTP collector. - - The companion Splunk Monitoring & Diagnostics App is available here U(https://splunkbase.splunk.com/app/4023/). - - Credit to "Ryan Currah (@ryancurrah)" for original source upon which this is based. - requirements: - - Whitelisting this callback plugin - - 'Create a HTTP Event Collector in Splunk' - - 'Define the URL and token in C(ansible.cfg)' - options: - url: - description: URL to the Splunk HTTP collector source. - env: - - name: SPLUNK_URL - ini: - - section: callback_splunk - key: url - authtoken: - description: Token to authenticate the connection to the Splunk HTTP collector. - env: - - name: SPLUNK_AUTHTOKEN - ini: - - section: callback_splunk - key: authtoken - validate_certs: - description: Whether to validate certificates for connections to HEC. It is not recommended to set to - C(false) except when you are sure that nobody can intercept the connection - between this plugin and HEC, as setting it to C(false) allows man-in-the-middle attacks! - env: - - name: SPLUNK_VALIDATE_CERTS - ini: - - section: callback_splunk - key: validate_certs - type: bool - default: true - version_added: '1.0.0' - include_milliseconds: - description: Whether to include milliseconds as part of the generated timestamp field in the event - sent to the Splunk HTTP collector. - env: - - name: SPLUNK_INCLUDE_MILLISECONDS - ini: - - section: callback_splunk - key: include_milliseconds - type: bool - default: false - version_added: 2.0.0 - batch: - description: - - Correlation ID which can be set across multiple playbook executions. - env: - - name: SPLUNK_BATCH - ini: - - section: callback_splunk - key: batch - type: str - version_added: 3.3.0 -''' + - Correlation ID which can be set across multiple playbook executions. + env: + - name: SPLUNK_BATCH + ini: + - section: callback_splunk + key: batch + type: str + version_added: 3.3.0 +""" -EXAMPLES = ''' -examples: > +EXAMPLES = r""" +examples: >- To enable, add this to your ansible.cfg file in the defaults block [defaults] callback_whitelist = community.general.splunk @@ -81,26 +81,29 @@ examples: > [callback_splunk] url = http://mysplunkinstance.datapaas.io:8088/services/collector/event authtoken = f23blad6-5965-4537-bf69-5b5a545blabla88 -''' +""" import json import uuid import socket import getpass -from datetime import datetime from os.path import basename +from ansible.module_utils.ansible_release import __version__ as ansible_version from ansible.module_utils.urls import open_url from ansible.parsing.ajson import AnsibleJSONEncoder from ansible.plugins.callback import CallbackBase +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + class SplunkHTTPCollectorSource(object): def __init__(self): self.ansible_check_mode = False self.ansible_playbook = "" - self.ansible_version = "" self.session = str(uuid.uuid4()) self.host = socket.gethostname() self.ip_address = socket.gethostbyname(socket.gethostname()) @@ -110,10 +113,6 @@ class SplunkHTTPCollectorSource(object): if result._task_fields['args'].get('_ansible_check_mode') is True: self.ansible_check_mode = True - if result._task_fields['args'].get('_ansible_version'): - self.ansible_version = \ - result._task_fields['args'].get('_ansible_version') - if result._task._role: ansible_role = str(result._task._role) else: @@ -134,12 +133,12 @@ class SplunkHTTPCollectorSource(object): else: time_format = '%Y-%m-%d %H:%M:%S +0000' - data['timestamp'] = datetime.utcnow().strftime(time_format) + data['timestamp'] = now().strftime(time_format) data['host'] = self.host data['ip_address'] = self.ip_address data['user'] = self.user data['runtime'] = runtime - data['ansible_version'] = self.ansible_version + data['ansible_version'] = ansible_version data['ansible_check_mode'] = self.ansible_check_mode data['ansible_host'] = result._host.name data['ansible_playbook'] = self.ansible_playbook @@ -148,15 +147,14 @@ class SplunkHTTPCollectorSource(object): data['ansible_result'] = result._result # This wraps the json payload in and outer json event needed by Splunk - jsondata = json.dumps(data, cls=AnsibleJSONEncoder, sort_keys=True) - jsondata = '{"event":' + jsondata + "}" + jsondata = json.dumps({"event": data}, cls=AnsibleJSONEncoder, sort_keys=True) open_url( url, jsondata, headers={ 'Content-type': 'application/json', - 'Authorization': 'Splunk ' + authtoken + 'Authorization': f"Splunk {authtoken}" }, method='POST', validate_certs=validate_certs @@ -181,7 +179,7 @@ class CallbackModule(CallbackBase): def _runtime(self, result): return ( - datetime.utcnow() - + now() - self.start_datetimes[result._task._uuid] ).total_seconds() @@ -220,10 +218,10 @@ class CallbackModule(CallbackBase): self.splunk.ansible_playbook = basename(playbook._file_name) def v2_playbook_on_task_start(self, task, is_conditional): - self.start_datetimes[task._uuid] = datetime.utcnow() + self.start_datetimes[task._uuid] = now() def v2_playbook_on_handler_task_start(self, task): - self.start_datetimes[task._uuid] = datetime.utcnow() + self.start_datetimes[task._uuid] = now() def v2_runner_on_ok(self, result, **kwargs): self.splunk.send_event( diff --git a/plugins/callback/sumologic.py b/plugins/callback/sumologic.py index 998081c35b..3f99bf216a 100644 --- a/plugins/callback/sumologic.py +++ b/plugins/callback/sumologic.py @@ -1,34 +1,33 @@ -# -*- coding: utf-8 -*- # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" name: sumologic type: notification short_description: Sends task result events to Sumologic author: "Ryan Currah (@ryancurrah)" description: - - This callback plugin will send task results as JSON formatted events to a Sumologic HTTP collector source. + - This callback plugin sends task results as JSON formatted events to a Sumologic HTTP collector source. requirements: - Whitelisting this callback plugin - - 'Create a HTTP collector source in Sumologic and specify a custom timestamp format of C(yyyy-MM-dd HH:mm:ss ZZZZ) and a custom timestamp locator - of C("timestamp": "(.*)")' + - 'Create a HTTP collector source in Sumologic and specify a custom timestamp format of V(yyyy-MM-dd HH:mm:ss ZZZZ) and + a custom timestamp locator of V("timestamp": "(.*\)")' options: url: description: URL to the Sumologic HTTP collector source. + type: str env: - name: SUMOLOGIC_URL ini: - section: callback_sumologic key: url -''' +""" -EXAMPLES = ''' -examples: | +EXAMPLES = r""" +examples: |- To enable, add this to your ansible.cfg file in the defaults block [defaults] callback_whitelist = community.general.sumologic @@ -39,26 +38,29 @@ examples: | Set the ansible.cfg variable in the callback_sumologic block [callback_sumologic] url = https://endpoint1.collection.us2.sumologic.com/receiver/v1/http/R8moSv1d8EW9LAUFZJ6dbxCFxwLH6kfCdcBfddlfxCbLuL-BN5twcTpMk__pYy_cDmp== -''' +""" import json import uuid import socket import getpass -from datetime import datetime from os.path import basename +from ansible.module_utils.ansible_release import __version__ as ansible_version from ansible.module_utils.urls import open_url from ansible.parsing.ajson import AnsibleJSONEncoder from ansible.plugins.callback import CallbackBase +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + class SumologicHTTPCollectorSource(object): def __init__(self): self.ansible_check_mode = False self.ansible_playbook = "" - self.ansible_version = "" self.session = str(uuid.uuid4()) self.host = socket.gethostname() self.ip_address = socket.gethostbyname(socket.gethostname()) @@ -68,10 +70,6 @@ class SumologicHTTPCollectorSource(object): if result._task_fields['args'].get('_ansible_check_mode') is True: self.ansible_check_mode = True - if result._task_fields['args'].get('_ansible_version'): - self.ansible_version = \ - result._task_fields['args'].get('_ansible_version') - if result._task._role: ansible_role = str(result._task._role) else: @@ -84,13 +82,12 @@ class SumologicHTTPCollectorSource(object): data['uuid'] = result._task._uuid data['session'] = self.session data['status'] = state - data['timestamp'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S ' - '+0000') + data['timestamp'] = now().strftime('%Y-%m-%d %H:%M:%S +0000') data['host'] = self.host data['ip_address'] = self.ip_address data['user'] = self.user data['runtime'] = runtime - data['ansible_version'] = self.ansible_version + data['ansible_version'] = ansible_version data['ansible_check_mode'] = self.ansible_check_mode data['ansible_host'] = result._host.name data['ansible_playbook'] = self.ansible_playbook @@ -123,7 +120,7 @@ class CallbackModule(CallbackBase): def _runtime(self, result): return ( - datetime.utcnow() - + now() - self.start_datetimes[result._task._uuid] ).total_seconds() @@ -144,10 +141,10 @@ class CallbackModule(CallbackBase): self.sumologic.ansible_playbook = basename(playbook._file_name) def v2_playbook_on_task_start(self, task, is_conditional): - self.start_datetimes[task._uuid] = datetime.utcnow() + self.start_datetimes[task._uuid] = now() def v2_playbook_on_handler_task_start(self, task): - self.start_datetimes[task._uuid] = datetime.utcnow() + self.start_datetimes[task._uuid] = now() def v2_runner_on_ok(self, result, **kwargs): self.sumologic.send_event( diff --git a/plugins/callback/syslog_json.py b/plugins/callback/syslog_json.py index 2bd8f6e604..657ca017f6 100644 --- a/plugins/callback/syslog_json.py +++ b/plugins/callback/syslog_json.py @@ -1,61 +1,58 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: syslog_json - type: notification - requirements: - - whitelist in configuration - short_description: sends JSON events to syslog - description: - - This plugin logs ansible-playbook and ansible runs to a syslog server in JSON format. - - Before Ansible 2.9 only environment variables were available for configuration. - options: - server: - description: Syslog server that will receive the event. - env: - - name: SYSLOG_SERVER - default: localhost - ini: - - section: callback_syslog_json - key: syslog_server - port: - description: Port on which the syslog server is listening. - env: - - name: SYSLOG_PORT - default: 514 - ini: - - section: callback_syslog_json - key: syslog_port - facility: - description: Syslog facility to log as. - env: - - name: SYSLOG_FACILITY - default: user - ini: - - section: callback_syslog_json - key: syslog_facility - setup: - description: Log setup tasks. - env: - - name: ANSIBLE_SYSLOG_SETUP - type: bool - default: true - ini: - - section: callback_syslog_json - key: syslog_setup - version_added: 4.5.0 -''' - -import os -import json +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: syslog_json +type: notification +requirements: + - whitelist in configuration +short_description: Sends JSON events to syslog +description: + - This plugin logs ansible-playbook and ansible runs to a syslog server in JSON format. +options: + server: + description: Syslog server that receives the event. + type: str + env: + - name: SYSLOG_SERVER + default: localhost + ini: + - section: callback_syslog_json + key: syslog_server + port: + description: Port on which the syslog server is listening. + type: int + env: + - name: SYSLOG_PORT + default: 514 + ini: + - section: callback_syslog_json + key: syslog_port + facility: + description: Syslog facility to log as. + type: str + env: + - name: SYSLOG_FACILITY + default: user + ini: + - section: callback_syslog_json + key: syslog_facility + setup: + description: Log setup tasks. + env: + - name: ANSIBLE_SYSLOG_SETUP + type: bool + default: true + ini: + - section: callback_syslog_json + key: syslog_setup + version_added: 4.5.0 +""" import logging import logging.handlers diff --git a/plugins/callback/tasks_only.py b/plugins/callback/tasks_only.py new file mode 100644 index 0000000000..3de81fc2db --- /dev/null +++ b/plugins/callback/tasks_only.py @@ -0,0 +1,68 @@ + +# Copyright (c) 2025, Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +author: Felix Fontein (@felixfontein) +name: tasks_only +type: stdout +version_added: 11.1.0 +short_description: Only show tasks +description: + - Removes play start and stats marker from P(ansible.builtin.default#callback)'s output. + - Can be used to generate output for documentation examples. + For this, the O(number_of_columns) option should be set to an explicit value. +extends_documentation_fragment: + - ansible.builtin.default_callback + - ansible.builtin.result_format_callback +options: + number_of_columns: + description: + - Sets the number of columns for Ansible's display. + type: int + env: + - name: ANSIBLE_COLLECTIONS_TASKS_ONLY_NUMBER_OF_COLUMNS + result_format: + # Part of the ansible.builtin.result_format_callback doc fragment + version_added: 11.2.0 + pretty_results: + # Part of the ansible.builtin.result_format_callback doc fragment + version_added: 11.2.0 +""" + +EXAMPLES = r""" +--- +# Enable callback in ansible.cfg: +ansible_config: |- + [defaults] + stdout_callback = community.general.tasks_only + +--- +# Enable callback with environment variables: +environment_variable: |- + ANSIBLE_STDOUT_CALLBACK=community.general.tasks_only +""" + +from ansible.plugins.callback.default import CallbackModule as Default + + +class CallbackModule(Default): + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = 'stdout' + CALLBACK_NAME = 'community.general.tasks_only' + + def v2_playbook_on_play_start(self, play): + pass + + def v2_playbook_on_stats(self, stats): + pass + + def set_options(self, *args, **kwargs): + result = super(CallbackModule, self).set_options(*args, **kwargs) + self.number_of_columns = self.get_option("number_of_columns") + if self.number_of_columns is not None: + self._display.columns = self.number_of_columns + return result diff --git a/plugins/callback/timestamp.py b/plugins/callback/timestamp.py new file mode 100644 index 0000000000..f733fa8cb7 --- /dev/null +++ b/plugins/callback/timestamp.py @@ -0,0 +1,124 @@ + +# Copyright (c) 2024, kurokobo +# Copyright (c) 2014, Michael DeHaan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +name: timestamp +type: stdout +short_description: Adds simple timestamp for each header +version_added: 9.0.0 +description: + - This callback adds simple timestamp for each header. +author: kurokobo (@kurokobo) +options: + timezone: + description: + - Timezone to use for the timestamp in IANA time zone format. + - For example V(America/New_York), V(Asia/Tokyo)). Ignored on Python < 3.9. + ini: + - section: callback_timestamp + key: timezone + env: + - name: ANSIBLE_CALLBACK_TIMESTAMP_TIMEZONE + type: string + format_string: + description: + - Format of the timestamp shown to user in 1989 C standard format. + - Refer to L(the Python documentation,https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes) + for the available format codes. + ini: + - section: callback_timestamp + key: format_string + env: + - name: ANSIBLE_CALLBACK_TIMESTAMP_FORMAT_STRING + default: "%H:%M:%S" + type: string +seealso: + - plugin: ansible.posix.profile_tasks + plugin_type: callback + description: >- + You can use P(ansible.posix.profile_tasks#callback) callback plugin to time individual tasks and overall execution time + with detailed timestamps. +extends_documentation_fragment: + - ansible.builtin.default_callback + - ansible.builtin.result_format_callback +""" + + +from ansible.plugins.callback.default import CallbackModule as Default +from ansible.utils.display import get_text_width +from ansible.module_utils.common.text.converters import to_text +from datetime import datetime +import types +import sys + +# Store whether the zoneinfo module is available +_ZONEINFO_AVAILABLE = sys.version_info >= (3, 9) + + +def get_datetime_now(tz): + """ + Returns the current timestamp with the specified timezone + """ + return datetime.now(tz=tz) + + +def banner(self, msg, color=None, cows=True): + """ + Prints a header-looking line with cowsay or stars with length depending on terminal width (3 minimum) with trailing timestamp + + Based on the banner method of Display class from ansible.utils.display + + https://github.com/ansible/ansible/blob/4403519afe89138042108e237aef317fd5f09c33/lib/ansible/utils/display.py#L511 + """ + timestamp = get_datetime_now(self.timestamp_tzinfo).strftime(self.timestamp_format_string) + timestamp_len = get_text_width(timestamp) + 1 # +1 for leading space + + msg = to_text(msg) + if self.b_cowsay and cows: + try: + self.banner_cowsay(f"{msg} @ {timestamp}") + return + except OSError: + self.warning("somebody cleverly deleted cowsay or something during the PB run. heh.") + + msg = msg.strip() + try: + star_len = self.columns - get_text_width(msg) - timestamp_len + except EnvironmentError: + star_len = self.columns - len(msg) - timestamp_len + if star_len <= 3: + star_len = 3 + stars = "*" * star_len + self.display(f"\n{msg} {stars} {timestamp}", color=color) + + +class CallbackModule(Default): + CALLBACK_VERSION = 2.0 + CALLBACK_TYPE = "stdout" + CALLBACK_NAME = "community.general.timestamp" + + def __init__(self): + super(CallbackModule, self).__init__() + + # Replace the banner method of the display object with the custom one + self._display.banner = types.MethodType(banner, self._display) + + def set_options(self, task_keys=None, var_options=None, direct=None): + super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) + + # Store zoneinfo for specified timezone if available + tzinfo = None + if _ZONEINFO_AVAILABLE and self.get_option("timezone"): + from zoneinfo import ZoneInfo + + tzinfo = ZoneInfo(self.get_option("timezone")) + + # Inject options into the display object + setattr(self._display, "timestamp_tzinfo", tzinfo) + setattr(self._display, "timestamp_format_string", self.get_option("format_string")) diff --git a/plugins/callback/unixy.py b/plugins/callback/unixy.py index 02a2e46ba6..d155aefc66 100644 --- a/plugins/callback/unixy.py +++ b/plugins/callback/unixy.py @@ -1,25 +1,23 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2017, Allyson Bowles <@akatch> +# Copyright (c) 2023, Al Bowles <@akatch> # Copyright (c) 2012-2014, Michael DeHaan # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: unixy - type: stdout - author: Allyson Bowles (@akatch) - short_description: condensed Ansible output - description: - - Consolidated Ansible output in the style of LINUX/UNIX startup logs. - extends_documentation_fragment: - - default_callback - requirements: - - set as stdout in configuration -''' +DOCUMENTATION = r""" +name: unixy +type: stdout +author: Al Bowles (@akatch) +short_description: Condensed Ansible output +description: + - Consolidated Ansible output in the style of LINUX/UNIX startup logs. +extends_documentation_fragment: + - default_callback +requirements: + - set as stdout in configuration +""" from os.path import basename from ansible import constants as C @@ -40,7 +38,6 @@ class CallbackModule(CallbackModule_default): - Only display task names if the task runs on at least one host - Add option to display all hostnames on a single line in the appropriate result color (failures may have a separate line) - Consolidate stats display - - Display whether run is in --check mode - Don't show play name if no hosts found ''' @@ -68,43 +65,55 @@ class CallbackModule(CallbackModule_default): def _process_result_output(self, result, msg): task_host = result._host.get_name() - task_result = "%s %s" % (task_host, msg) + task_result = f"{task_host} {msg}" if self._run_is_verbose(result): - task_result = "%s %s: %s" % (task_host, msg, self._dump_results(result._result, indent=4)) + task_result = f"{task_host} {msg}: {self._dump_results(result._result, indent=4)}" return task_result if self.delegated_vars: task_delegate_host = self.delegated_vars['ansible_host'] - task_result = "%s -> %s %s" % (task_host, task_delegate_host, msg) + task_result = f"{task_host} -> {task_delegate_host} {msg}" if result._result.get('msg') and result._result.get('msg') != "All items completed": - task_result += " | msg: " + to_text(result._result.get('msg')) + task_result += f" | msg: {to_text(result._result.get('msg'))}" if result._result.get('stdout'): - task_result += " | stdout: " + result._result.get('stdout') + task_result += f" | stdout: {result._result.get('stdout')}" if result._result.get('stderr'): - task_result += " | stderr: " + result._result.get('stderr') + task_result += f" | stderr: {result._result.get('stderr')}" return task_result def v2_playbook_on_task_start(self, task, is_conditional): self._get_task_display_name(task) if self.task_display_name is not None: - self._display.display("%s..." % self.task_display_name) + if task.check_mode and self.get_option('check_mode_markers'): + self._display.display(f"{self.task_display_name} (check mode)...") + else: + self._display.display(f"{self.task_display_name}...") def v2_playbook_on_handler_task_start(self, task): self._get_task_display_name(task) if self.task_display_name is not None: - self._display.display("%s (via handler)... " % self.task_display_name) + if task.check_mode and self.get_option('check_mode_markers'): + self._display.display(f"{self.task_display_name} (via handler in check mode)... ") + else: + self._display.display(f"{self.task_display_name} (via handler)... ") def v2_playbook_on_play_start(self, play): name = play.get_name().strip() - if name and play.hosts: - msg = u"\n- %s on hosts: %s -" % (name, ",".join(play.hosts)) + if play.check_mode and self.get_option('check_mode_markers'): + if name and play.hosts: + msg = f"\n- {name} (in check mode) on hosts: {','.join(play.hosts)} -" + else: + msg = "- check mode -" else: - msg = u"---" + if name and play.hosts: + msg = f"\n- {name} on hosts: {','.join(play.hosts)} -" + else: + msg = "---" self._display.display(msg) @@ -115,7 +124,7 @@ class CallbackModule(CallbackModule_default): msg = "skipped" task_result = self._process_result_output(result, msg) - self._display.display(" " + task_result, display_color) + self._display.display(f" {task_result}", display_color) else: return @@ -125,10 +134,10 @@ class CallbackModule(CallbackModule_default): msg = "failed" item_value = self._get_item_label(result._result) if item_value: - msg += " | item: %s" % (item_value,) + msg += f" | item: {item_value}" task_result = self._process_result_output(result, msg) - self._display.display(" " + task_result, display_color, stderr=self.get_option('display_failed_stderr')) + self._display.display(f" {task_result}", display_color, stderr=self.get_option('display_failed_stderr')) def v2_runner_on_ok(self, result, msg="ok", display_color=C.COLOR_OK): self._preprocess_result(result) @@ -138,13 +147,13 @@ class CallbackModule(CallbackModule_default): msg = "done" item_value = self._get_item_label(result._result) if item_value: - msg += " | item: %s" % (item_value,) + msg += f" | item: {item_value}" display_color = C.COLOR_CHANGED task_result = self._process_result_output(result, msg) - self._display.display(" " + task_result, display_color) + self._display.display(f" {task_result}", display_color) elif self.get_option('display_ok_hosts'): task_result = self._process_result_output(result, msg) - self._display.display(" " + task_result, display_color) + self._display.display(f" {task_result}", display_color) def v2_runner_item_on_skipped(self, result): self.v2_runner_on_skipped(result) @@ -162,7 +171,7 @@ class CallbackModule(CallbackModule_default): display_color = C.COLOR_UNREACHABLE task_result = self._process_result_output(result, msg) - self._display.display(" " + task_result, display_color, stderr=self.get_option('display_failed_stderr')) + self._display.display(f" {task_result}", display_color, stderr=self.get_option('display_failed_stderr')) def v2_on_file_diff(self, result): if result._task.loop and 'results' in result._result: @@ -184,25 +193,17 @@ class CallbackModule(CallbackModule_default): # TODO how else can we display these? t = stats.summarize(h) - self._display.display(u" %s : %s %s %s %s %s %s" % ( - hostcolor(h, t), - colorize(u'ok', t['ok'], C.COLOR_OK), - colorize(u'changed', t['changed'], C.COLOR_CHANGED), - colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE), - colorize(u'failed', t['failures'], C.COLOR_ERROR), - colorize(u'rescued', t['rescued'], C.COLOR_OK), - colorize(u'ignored', t['ignored'], C.COLOR_WARN)), + self._display.display( + f" {hostcolor(h, t)} : {colorize('ok', t['ok'], C.COLOR_OK)} {colorize('changed', t['changed'], C.COLOR_CHANGED)} " + f"{colorize('unreachable', t['unreachable'], C.COLOR_UNREACHABLE)} {colorize('failed', t['failures'], C.COLOR_ERROR)} " + f"{colorize('rescued', t['rescued'], C.COLOR_OK)} {colorize('ignored', t['ignored'], C.COLOR_WARN)}", screen_only=True ) - self._display.display(u" %s : %s %s %s %s %s %s" % ( - hostcolor(h, t, False), - colorize(u'ok', t['ok'], None), - colorize(u'changed', t['changed'], None), - colorize(u'unreachable', t['unreachable'], None), - colorize(u'failed', t['failures'], None), - colorize(u'rescued', t['rescued'], None), - colorize(u'ignored', t['ignored'], None)), + self._display.display( + f" {hostcolor(h, t, False)} : {colorize('ok', t['ok'], None)} {colorize('changed', t['changed'], None)} " + f"{colorize('unreachable', t['unreachable'], None)} {colorize('failed', t['failures'], None)} {colorize('rescued', t['rescued'], None)} " + f"{colorize('ignored', t['ignored'], None)}", log_only=True ) if stats.custom and self.get_option('show_custom_stats'): @@ -212,12 +213,14 @@ class CallbackModule(CallbackModule_default): for k in sorted(stats.custom.keys()): if k == '_run': continue - self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', ''))) + stat_val = self._dump_results(stats.custom[k], indent=1).replace('\n', '') + self._display.display(f'\t{k}: {stat_val}') # print per run custom stats if '_run' in stats.custom: self._display.display("", screen_only=True) - self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', '')) + stat_val_run = self._dump_results(stats.custom['_run'], indent=1).replace('\n', '') + self._display.display(f'\tRUN: {stat_val_run}') self._display.display("", screen_only=True) def v2_playbook_on_no_hosts_matched(self): @@ -227,22 +230,24 @@ class CallbackModule(CallbackModule_default): self._display.display(" Ran out of hosts!", color=C.COLOR_ERROR) def v2_playbook_on_start(self, playbook): - # TODO display whether this run is happening in check mode - self._display.display("Executing playbook %s" % basename(playbook._file_name)) + if context.CLIARGS['check'] and self.get_option('check_mode_markers'): + self._display.display(f"Executing playbook {basename(playbook._file_name)} in check mode") + else: + self._display.display(f"Executing playbook {basename(playbook._file_name)}") # show CLI arguments if self._display.verbosity > 3: if context.CLIARGS.get('args'): - self._display.display('Positional arguments: %s' % ' '.join(context.CLIARGS['args']), + self._display.display(f"Positional arguments: {' '.join(context.CLIARGS['args'])}", color=C.COLOR_VERBOSE, screen_only=True) for argument in (a for a in context.CLIARGS if a != 'args'): val = context.CLIARGS[argument] if val: - self._display.vvvv('%s: %s' % (argument, val)) + self._display.vvvv(f'{argument}: {val}') def v2_runner_retry(self, result): - msg = " Retrying... (%d of %d)" % (result._result['attempts'], result._result['retries']) + msg = f" Retrying... ({result._result['attempts']} of {result._result['retries']})" if self._run_is_verbose(result): - msg += "Result was: %s" % self._dump_results(result._result) + msg += f"Result was: {self._dump_results(result._result)}" self._display.display(msg, color=C.COLOR_DEBUG) diff --git a/plugins/callback/yaml.py b/plugins/callback/yaml.py deleted file mode 100644 index 73782de151..0000000000 --- a/plugins/callback/yaml.py +++ /dev/null @@ -1,132 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: yaml - type: stdout - short_description: YAML-ized Ansible screen output - description: - - Ansible output that can be quite a bit easier to read than the - default JSON formatting. - extends_documentation_fragment: - - default_callback - requirements: - - set as stdout in configuration -''' - -import yaml -import json -import re -import string -import sys - -from ansible.module_utils.common.text.converters import to_bytes, to_text -from ansible.module_utils.six import string_types -from ansible.parsing.yaml.dumper import AnsibleDumper -from ansible.plugins.callback import CallbackBase, strip_internal_keys, module_response_deepcopy -from ansible.plugins.callback.default import CallbackModule as Default - - -# from http://stackoverflow.com/a/15423007/115478 -def should_use_block(value): - """Returns true if string should be in block format""" - for c in u"\u000a\u000d\u001c\u001d\u001e\u0085\u2028\u2029": - if c in value: - return True - return False - - -class MyDumper(AnsibleDumper): - def represent_scalar(self, tag, value, style=None): - """Uses block style for multi-line strings""" - if style is None: - if should_use_block(value): - style = '|' - # we care more about readable than accuracy, so... - # ...no trailing space - value = value.rstrip() - # ...and non-printable characters - value = ''.join(x for x in value if x in string.printable or ord(x) >= 0xA0) - # ...tabs prevent blocks from expanding - value = value.expandtabs() - # ...and odd bits of whitespace - value = re.sub(r'[\x0b\x0c\r]', '', value) - # ...as does trailing space - value = re.sub(r' +\n', '\n', value) - else: - style = self.default_style - node = yaml.representer.ScalarNode(tag, value, style=style) - if self.alias_key is not None: - self.represented_objects[self.alias_key] = node - return node - - -class CallbackModule(Default): - - """ - Variation of the Default output which uses nicely readable YAML instead - of JSON for printing results. - """ - - CALLBACK_VERSION = 2.0 - CALLBACK_TYPE = 'stdout' - CALLBACK_NAME = 'community.general.yaml' - - def __init__(self): - super(CallbackModule, self).__init__() - - def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False): - if result.get('_ansible_no_log', False): - return json.dumps(dict(censored="The output has been hidden due to the fact that 'no_log: true' was specified for this result")) - - # All result keys stating with _ansible_ are internal, so remove them from the result before we output anything. - abridged_result = strip_internal_keys(module_response_deepcopy(result)) - - # remove invocation unless specifically wanting it - if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result: - del abridged_result['invocation'] - - # remove diff information from screen output - if self._display.verbosity < 3 and 'diff' in result: - del abridged_result['diff'] - - # remove exception from screen output - if 'exception' in abridged_result: - del abridged_result['exception'] - - dumped = '' - - # put changed and skipped into a header line - if 'changed' in abridged_result: - dumped += 'changed=' + str(abridged_result['changed']).lower() + ' ' - del abridged_result['changed'] - - if 'skipped' in abridged_result: - dumped += 'skipped=' + str(abridged_result['skipped']).lower() + ' ' - del abridged_result['skipped'] - - # if we already have stdout, we don't need stdout_lines - if 'stdout' in abridged_result and 'stdout_lines' in abridged_result: - abridged_result['stdout_lines'] = '' - - # if we already have stderr, we don't need stderr_lines - if 'stderr' in abridged_result and 'stderr_lines' in abridged_result: - abridged_result['stderr_lines'] = '' - - if abridged_result: - dumped += '\n' - dumped += to_text(yaml.dump(abridged_result, allow_unicode=True, width=1000, Dumper=MyDumper, default_flow_style=False)) - - # indent by a couple of spaces - dumped = '\n '.join(dumped.split('\n')).rstrip() - return dumped - - def _serialize_diff(self, diff): - return to_text(yaml.dump(diff, allow_unicode=True, width=1000, Dumper=AnsibleDumper, default_flow_style=False)) diff --git a/plugins/connection/chroot.py b/plugins/connection/chroot.py index ef6d5566d3..35f7312326 100644 --- a/plugins/connection/chroot.py +++ b/plugins/connection/chroot.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Based on local.py (c) 2012, Michael DeHaan # # (c) 2013, Maykel Moya @@ -7,57 +6,80 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Maykel Moya (!UNKNOWN) - name: chroot - short_description: Interact with local chroot +DOCUMENTATION = r""" +author: Maykel Moya (!UNKNOWN) +name: chroot +short_description: Interact with local chroot +description: + - Run commands or put/fetch files to an existing chroot on the Ansible controller. +options: + remote_addr: description: - - Run commands or put/fetch files to an existing chroot on the Ansible controller. - options: - remote_addr: - description: - - The path of the chroot you want to access. - default: inventory_hostname - vars: - - name: inventory_hostname - - name: ansible_host - executable: - description: - - User specified executable shell - ini: - - section: defaults - key: executable - env: - - name: ANSIBLE_EXECUTABLE - vars: - - name: ansible_executable - default: /bin/sh - chroot_exe: - description: - - User specified chroot binary - ini: - - section: chroot_connection - key: exe - env: - - name: ANSIBLE_CHROOT_EXE - vars: - - name: ansible_chroot_exe - default: chroot -''' + - The path of the chroot you want to access. + type: string + default: inventory_hostname + vars: + - name: inventory_hostname + - name: ansible_host + executable: + description: + - User specified executable shell. + type: string + ini: + - section: defaults + key: executable + env: + - name: ANSIBLE_EXECUTABLE + vars: + - name: ansible_executable + default: /bin/sh + chroot_exe: + description: + - User specified chroot binary. + type: string + ini: + - section: chroot_connection + key: exe + env: + - name: ANSIBLE_CHROOT_EXE + vars: + - name: ansible_chroot_exe + default: chroot + disable_root_check: + description: + - Do not check that the user is not root. + ini: + - section: chroot_connection + key: disable_root_check + env: + - name: ANSIBLE_CHROOT_DISABLE_ROOT_CHECK + vars: + - name: ansible_chroot_disable_root_check + default: false + type: bool + version_added: 7.3.0 +""" + +EXAMPLES = r""" +- hosts: chroots + connection: community.general.chroot + tasks: + - debug: + msg: "This is coming from chroot environment" +""" import os import os.path import subprocess import traceback +from shlex import quote as shlex_quote from ansible.errors import AnsibleError from ansible.module_utils.basic import is_executable from ansible.module_utils.common.process import get_bin_path -from ansible.module_utils.six.moves import shlex_quote -from ansible.module_utils.common.text.converters import to_bytes, to_native +from ansible.module_utils.common.text.converters import to_bytes from ansible.plugins.connection import ConnectionBase, BUFSIZE from ansible.utils.display import Display @@ -81,31 +103,32 @@ class Connection(ConnectionBase): self.chroot = self._play_context.remote_addr - if os.geteuid() != 0: - raise AnsibleError("chroot connection requires running as root") - - # we're running as root on the local system so do some - # trivial checks for ensuring 'host' is actually a chroot'able dir + # do some trivial checks for ensuring 'host' is actually a chroot'able dir if not os.path.isdir(self.chroot): - raise AnsibleError("%s is not a directory" % self.chroot) + raise AnsibleError(f"{self.chroot} is not a directory") chrootsh = os.path.join(self.chroot, 'bin/sh') # Want to check for a usable bourne shell inside the chroot. # is_executable() == True is sufficient. For symlinks it # gets really complicated really fast. So we punt on finding that - # out. As long as it's a symlink we assume that it will work + # out. As long as it is a symlink we assume that it will work if not (is_executable(chrootsh) or (os.path.lexists(chrootsh) and os.path.islink(chrootsh))): - raise AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot) + raise AnsibleError(f"{self.chroot} does not look like a chrootable dir (/bin/sh missing)") def _connect(self): """ connect to the chroot """ + if not self.get_option('disable_root_check') and os.geteuid() != 0: + raise AnsibleError( + "chroot connection requires running as root. " + "You can override this check with the `disable_root_check` option.") + if os.path.isabs(self.get_option('chroot_exe')): self.chroot_cmd = self.get_option('chroot_exe') else: try: self.chroot_cmd = get_bin_path(self.get_option('chroot_exe')) except ValueError as e: - raise AnsibleError(to_native(e)) + raise AnsibleError(str(e)) super(Connection, self)._connect() if not self._connected: @@ -123,7 +146,7 @@ class Connection(ConnectionBase): executable = self.get_option('executable') local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd] - display.vvv("EXEC %s" % local_cmd, host=self.chroot) + display.vvv(f"EXEC {local_cmd}", host=self.chroot) local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -148,7 +171,7 @@ class Connection(ConnectionBase): exist in any given chroot. So for now we're choosing "/" instead. This also happens to be the former default. - Can revisit using $HOME instead if it's a problem + Can revisit using $HOME instead if it is a problem """ if not remote_path.startswith(os.path.sep): remote_path = os.path.join(os.path.sep, remote_path) @@ -157,7 +180,7 @@ class Connection(ConnectionBase): def put_file(self, in_path, out_path): """ transfer a file from local to chroot """ super(Connection, self).put_file(in_path, out_path) - display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot) + display.vvv(f"PUT {in_path} TO {out_path}", host=self.chroot) out_path = shlex_quote(self._prefix_login_path(out_path)) try: @@ -167,27 +190,27 @@ class Connection(ConnectionBase): else: count = '' try: - p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file) + p = self._buffered_exec_command(f'dd of={out_path} bs={BUFSIZE}{count}', stdin=in_file) except OSError: raise AnsibleError("chroot connection requires dd command in the chroot") try: stdout, stderr = p.communicate() except Exception: traceback.print_exc() - raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}") if p.returncode != 0: - raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{stdout}\n{stderr}") except IOError: - raise AnsibleError("file or module does not exist at: %s" % in_path) + raise AnsibleError(f"file or module does not exist at: {in_path}") def fetch_file(self, in_path, out_path): """ fetch a file from chroot to local """ super(Connection, self).fetch_file(in_path, out_path) - display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot) + display.vvv(f"FETCH {in_path} TO {out_path}", host=self.chroot) in_path = shlex_quote(self._prefix_login_path(in_path)) try: - p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE)) + p = self._buffered_exec_command(f'dd if={in_path} bs={BUFSIZE}') except OSError: raise AnsibleError("chroot connection requires dd command in the chroot") @@ -199,10 +222,10 @@ class Connection(ConnectionBase): chunk = p.stdout.read(BUFSIZE) except Exception: traceback.print_exc() - raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}") stdout, stderr = p.communicate() if p.returncode != 0: - raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{stdout}\n{stderr}") def close(self): """ terminate the connection; nothing to do here """ diff --git a/plugins/connection/funcd.py b/plugins/connection/funcd.py index 9f37f791de..86d050c1db 100644 --- a/plugins/connection/funcd.py +++ b/plugins/connection/funcd.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Based on local.py (c) 2012, Michael DeHaan # Based on chroot.py (c) 2013, Maykel Moya # Copyright (c) 2013, Michael Scherer @@ -6,26 +5,26 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Michael Scherer (@mscherer) - name: funcd - short_description: Use funcd to connect to target +DOCUMENTATION = r""" +author: Michael Scherer (@mscherer) +name: funcd +short_description: Use funcd to connect to target +description: + - This transport permits you to use Ansible over Func. + - For people who have already setup func and that wish to play with ansible, this permit to move gradually to ansible without + having to redo completely the setup of the network. +options: + remote_addr: description: - - This transport permits you to use Ansible over Func. - - For people who have already setup func and that wish to play with ansible, - this permit to move gradually to ansible without having to redo completely the setup of the network. - options: - remote_addr: - description: - - The path of the chroot you want to access. - default: inventory_hostname - vars: - - name: ansible_host - - name: ansible_func_host -''' + - The path of the chroot you want to access. + type: string + default: inventory_hostname + vars: + - name: ansible_host + - name: ansible_func_host +""" HAVE_FUNC = False try: @@ -70,8 +69,8 @@ class Connection(ConnectionBase): if in_data: raise AnsibleError("Internal Error: this module does not support optimized module pipelining") - # totally ignores privlege escalation - display.vvv("EXEC %s" % cmd, host=self.host) + # totally ignores privilege escalation + display.vvv(f"EXEC {cmd}", host=self.host) p = self.client.command.run(cmd)[self.host] return p[0], p[1], p[2] @@ -86,14 +85,14 @@ class Connection(ConnectionBase): """ transfer a file from local to remote """ out_path = self._normalize_path(out_path, '/') - display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) + display.vvv(f"PUT {in_path} TO {out_path}", host=self.host) self.client.local.copyfile.send(in_path, out_path) def fetch_file(self, in_path, out_path): """ fetch a file from remote to local """ in_path = self._normalize_path(in_path, '/') - display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) + display.vvv(f"FETCH {in_path} TO {out_path}", host=self.host) # need to use a tmp dir due to difference of semantic for getfile # ( who take a # directory as destination) and fetch_file, who # take a file directly diff --git a/plugins/connection/incus.py b/plugins/connection/incus.py new file mode 100644 index 0000000000..3dfd37764b --- /dev/null +++ b/plugins/connection/incus.py @@ -0,0 +1,274 @@ +# Based on lxd.py (c) 2016, Matt Clay +# (c) 2023, Stephane Graber +# Copyright (c) 2023 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +author: Stéphane Graber (@stgraber) +name: incus +short_description: Run tasks in Incus instances using the Incus CLI +description: + - Run commands or put/fetch files to an existing Incus instance using Incus CLI. +version_added: "8.2.0" +options: + remote_addr: + description: + - The instance identifier. + type: string + default: inventory_hostname + vars: + - name: inventory_hostname + - name: ansible_host + - name: ansible_incus_host + executable: + description: + - The shell to use for execution inside the instance. + type: string + default: /bin/sh + vars: + - name: ansible_executable + - name: ansible_incus_executable + incus_become_method: + description: + - Become command used to switch to a non-root user. + - Is only used when O(remote_user) is not V(root). + type: str + default: /bin/su + vars: + - name: incus_become_method + version_added: 10.4.0 + remote: + description: + - The name of the Incus remote to use (per C(incus remote list)). + - Remotes are used to access multiple servers from a single client. + type: string + default: local + vars: + - name: ansible_incus_remote + remote_user: + description: + - User to login/authenticate as. + - Can be set from the CLI with the C(--user) or C(-u) options. + type: string + default: root + vars: + - name: ansible_user + env: + - name: ANSIBLE_REMOTE_USER + ini: + - section: defaults + key: remote_user + keyword: + - name: remote_user + version_added: 10.4.0 + project: + description: + - The name of the Incus project to use (per C(incus project list)). + - Projects are used to divide the instances running on a server. + type: string + default: default + vars: + - name: ansible_incus_project +""" + +import os +from subprocess import call, Popen, PIPE + +from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound +from ansible.module_utils.common.process import get_bin_path +from ansible.module_utils.common.text.converters import to_bytes, to_text +from ansible.plugins.connection import ConnectionBase + + +class Connection(ConnectionBase): + """ Incus based connections """ + + transport = "incus" + has_pipelining = True + + def __init__(self, play_context, new_stdin, *args, **kwargs): + super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) + + self._incus_cmd = get_bin_path("incus") + + if not self._incus_cmd: + raise AnsibleError("incus command not found in PATH") + + def _connect(self): + """connect to Incus (nothing to do here) """ + super(Connection, self)._connect() + + if not self._connected: + self._display.vvv(f"ESTABLISH Incus CONNECTION FOR USER: {self.get_option('remote_user')}", + host=self._instance()) + self._connected = True + + def _build_command(self, cmd) -> str: + """build the command to execute on the incus host""" + + exec_cmd = [ + self._incus_cmd, + "--project", self.get_option("project"), + "exec", + f"{self.get_option('remote')}:{self._instance()}", + "--"] + + if self.get_option("remote_user") != "root": + self._display.vvv( + f"INFO: Running as non-root user: {self.get_option('remote_user')}, \ + trying to run 'incus exec' with become method: {self.get_option('incus_become_method')}", + host=self._instance(), + ) + exec_cmd.extend( + [self.get_option("incus_become_method"), self.get_option("remote_user"), "-c"] + ) + + exec_cmd.extend([self.get_option("executable"), "-c", cmd]) + + return exec_cmd + + def _instance(self): + # Return only the leading part of the FQDN as the instance name + # as Incus instance names cannot be a FQDN. + return self.get_option('remote_addr').split(".")[0] + + def exec_command(self, cmd, in_data=None, sudoable=True): + """ execute a command on the Incus host """ + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + + self._display.vvv(f"EXEC {cmd}", + host=self._instance()) + + local_cmd = self._build_command(cmd) + self._display.vvvvv(f"EXEC {local_cmd}", host=self._instance()) + + local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] + in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru') + + process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) + stdout, stderr = process.communicate(in_data) + + stdout = to_text(stdout) + stderr = to_text(stderr) + + if stderr.startswith("Error: ") and stderr.rstrip().endswith( + ": Instance is not running" + ): + raise AnsibleConnectionFailure( + f"instance not running: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})" + ) + + if stderr.startswith("Error: ") and stderr.rstrip().endswith( + ": Instance not found" + ): + raise AnsibleConnectionFailure( + f"instance not found: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})" + ) + + if ( + stderr.startswith("Error: ") + and ": User does not have permission " in stderr + ): + raise AnsibleConnectionFailure( + f"instance access denied: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})" + ) + + if ( + stderr.startswith("Error: ") + and ": User does not have entitlement " in stderr + ): + raise AnsibleConnectionFailure( + f"instance access denied: {self._instance()} (remote={self.get_option('remote')}, project={self.get_option('project')})" + ) + + return process.returncode, stdout, stderr + + def _get_remote_uid_gid(self) -> tuple[int, int]: + """Get the user and group ID of 'remote_user' from the instance.""" + + rc, uid_out, err = self.exec_command("/bin/id -u") + if rc != 0: + raise AnsibleError( + f"Failed to get remote uid for user {self.get_option('remote_user')}: {err}" + ) + uid = uid_out.strip() + + rc, gid_out, err = self.exec_command("/bin/id -g") + if rc != 0: + raise AnsibleError( + f"Failed to get remote gid for user {self.get_option('remote_user')}: {err}" + ) + gid = gid_out.strip() + + return int(uid), int(gid) + + def put_file(self, in_path, out_path): + """ put a file from local to Incus """ + super(Connection, self).put_file(in_path, out_path) + + self._display.vvv(f"PUT {in_path} TO {out_path}", + host=self._instance()) + + if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')): + raise AnsibleFileNotFound(f"input path is not a file: {in_path}") + + if self.get_option("remote_user") != "root": + uid, gid = self._get_remote_uid_gid() + local_cmd = [ + self._incus_cmd, + "--project", + self.get_option("project"), + "file", + "push", + "--uid", + str(uid), + "--gid", + str(gid), + "--quiet", + in_path, + f"{self.get_option('remote')}:{self._instance()}/{out_path}", + ] + else: + local_cmd = [ + self._incus_cmd, + "--project", + self.get_option("project"), + "file", + "push", + "--quiet", + in_path, + f"{self.get_option('remote')}:{self._instance()}/{out_path}", + ] + + self._display.vvvvv(f"PUT {local_cmd}", host=self._instance()) + + local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] + + call(local_cmd) + + def fetch_file(self, in_path, out_path): + """ fetch a file from Incus to local """ + super(Connection, self).fetch_file(in_path, out_path) + + self._display.vvv(f"FETCH {in_path} TO {out_path}", + host=self._instance()) + + local_cmd = [ + self._incus_cmd, + "--project", self.get_option("project"), + "file", "pull", "--quiet", + f"{self.get_option('remote')}:{self._instance()}/{in_path}", + out_path] + + local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] + + call(local_cmd) + + def close(self): + """ close the connection (nothing to do here) """ + super(Connection, self).close() + + self._connected = False diff --git a/plugins/connection/iocage.py b/plugins/connection/iocage.py index 2e2a6f0937..fa4973bae1 100644 --- a/plugins/connection/iocage.py +++ b/plugins/connection/iocage.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Based on jail.py # (c) 2013, Michael Scherer # (c) 2015, Toshio Kuratomi @@ -7,29 +6,30 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Stephan Lohse (!UNKNOWN) - name: iocage - short_description: Run tasks in iocage jails +DOCUMENTATION = r""" +author: Stephan Lohse (!UNKNOWN) +name: iocage +short_description: Run tasks in iocage jails +description: + - Run commands or put/fetch files to an existing iocage jail. +options: + remote_addr: description: - - Run commands or put/fetch files to an existing iocage jail - options: - remote_addr: - description: - - Path to the jail - vars: - - name: ansible_host - - name: ansible_iocage_host - remote_user: - description: - - User to execute as inside the jail - vars: - - name: ansible_user - - name: ansible_iocage_user -''' + - Path to the jail. + type: string + vars: + - name: ansible_host + - name: ansible_iocage_host + remote_user: + description: + - User to execute as inside the jail. + type: string + vars: + - name: ansible_user + - name: ansible_iocage_user +""" import subprocess @@ -53,11 +53,12 @@ class Connection(Jail): jail_uuid = self.get_jail_uuid() - kwargs[Jail.modified_jailname_key] = 'ioc-{0}'.format(jail_uuid) + kwargs[Jail.modified_jailname_key] = f'ioc-{jail_uuid}' - display.vvv(u"Jail {iocjail} has been translated to {rawjail}".format( - iocjail=self.ioc_jail, rawjail=kwargs[Jail.modified_jailname_key]), - host=kwargs[Jail.modified_jailname_key]) + display.vvv( + f"Jail {self.ioc_jail} has been translated to {kwargs[Jail.modified_jailname_key]}", + host=kwargs[Jail.modified_jailname_key] + ) super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) @@ -79,6 +80,6 @@ class Connection(Jail): p.wait() if p.returncode != 0: - raise AnsibleError(u"iocage returned an error: {0}".format(stdout)) + raise AnsibleError(f"iocage returned an error: {stdout}") return stdout.strip('\n') diff --git a/plugins/connection/jail.py b/plugins/connection/jail.py index d813780136..7f25c3fe01 100644 --- a/plugins/connection/jail.py +++ b/plugins/connection/jail.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Based on local.py by Michael DeHaan # and chroot.py by Maykel Moya # Copyright (c) 2013, Michael Scherer @@ -7,38 +6,40 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Ansible Core Team - name: jail - short_description: Run tasks in jails +DOCUMENTATION = r""" +author: Ansible Core Team +name: jail +short_description: Run tasks in jails +description: + - Run commands or put/fetch files to an existing jail. +options: + remote_addr: description: - - Run commands or put/fetch files to an existing jail - options: - remote_addr: - description: - - Path to the jail - default: inventory_hostname - vars: - - name: ansible_host - - name: ansible_jail_host - remote_user: - description: - - User to execute as inside the jail - vars: - - name: ansible_user - - name: ansible_jail_user -''' + - Path to the jail. + type: string + default: inventory_hostname + vars: + - name: inventory_hostname + - name: ansible_host + - name: ansible_jail_host + remote_user: + description: + - User to execute as inside the jail. + type: string + vars: + - name: ansible_user + - name: ansible_jail_user +""" import os import os.path import subprocess import traceback +from shlex import quote as shlex_quote from ansible.errors import AnsibleError -from ansible.module_utils.six.moves import shlex_quote from ansible.module_utils.common.process import get_bin_path from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text from ansible.plugins.connection import ConnectionBase, BUFSIZE @@ -72,14 +73,14 @@ class Connection(ConnectionBase): self.jexec_cmd = self._search_executable('jexec') if self.jail not in self.list_jails(): - raise AnsibleError("incorrect jail name %s" % self.jail) + raise AnsibleError(f"incorrect jail name {self.jail}") @staticmethod def _search_executable(executable): try: return get_bin_path(executable) except ValueError: - raise AnsibleError("%s command not found in PATH" % executable) + raise AnsibleError(f"{executable} command not found in PATH") def list_jails(self): p = subprocess.Popen([self.jls_cmd, '-q', 'name'], @@ -94,7 +95,7 @@ class Connection(ConnectionBase): """ connect to the jail; nothing to do here """ super(Connection, self)._connect() if not self._connected: - display.vvv(u"ESTABLISH JAIL CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self.jail) + display.vvv(f"ESTABLISH JAIL CONNECTION FOR USER: {self._play_context.remote_user}", host=self.jail) self._connected = True def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE): @@ -112,11 +113,11 @@ class Connection(ConnectionBase): if self._play_context.remote_user is not None: local_cmd += ['-U', self._play_context.remote_user] # update HOME since -U does not update the jail environment - set_env = 'HOME=~' + self._play_context.remote_user + ' ' + set_env = f"HOME=~{self._play_context.remote_user} " local_cmd += [self.jail, self._play_context.executable, '-c', set_env + cmd] - display.vvv("EXEC %s" % (local_cmd,), host=self.jail) + display.vvv(f"EXEC {local_cmd}", host=self.jail) local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -141,7 +142,7 @@ class Connection(ConnectionBase): exist in any given chroot. So for now we're choosing "/" instead. This also happens to be the former default. - Can revisit using $HOME instead if it's a problem + Can revisit using $HOME instead if it is a problem """ if not remote_path.startswith(os.path.sep): remote_path = os.path.join(os.path.sep, remote_path) @@ -150,7 +151,7 @@ class Connection(ConnectionBase): def put_file(self, in_path, out_path): """ transfer a file from local to jail """ super(Connection, self).put_file(in_path, out_path) - display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail) + display.vvv(f"PUT {in_path} TO {out_path}", host=self.jail) out_path = shlex_quote(self._prefix_login_path(out_path)) try: @@ -160,27 +161,27 @@ class Connection(ConnectionBase): else: count = '' try: - p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file) + p = self._buffered_exec_command(f'dd of={out_path} bs={BUFSIZE}{count}', stdin=in_file) except OSError: raise AnsibleError("jail connection requires dd command in the jail") try: stdout, stderr = p.communicate() except Exception: traceback.print_exc() - raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}") if p.returncode != 0: - raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, to_native(stdout), to_native(stderr))) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{to_native(stdout)}\n{to_native(stderr)}") except IOError: - raise AnsibleError("file or module does not exist at: %s" % in_path) + raise AnsibleError(f"file or module does not exist at: {in_path}") def fetch_file(self, in_path, out_path): """ fetch a file from jail to local """ super(Connection, self).fetch_file(in_path, out_path) - display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail) + display.vvv(f"FETCH {in_path} TO {out_path}", host=self.jail) in_path = shlex_quote(self._prefix_login_path(in_path)) try: - p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE)) + p = self._buffered_exec_command(f'dd if={in_path} bs={BUFSIZE}') except OSError: raise AnsibleError("jail connection requires dd command in the jail") @@ -192,10 +193,10 @@ class Connection(ConnectionBase): chunk = p.stdout.read(BUFSIZE) except Exception: traceback.print_exc() - raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}") stdout, stderr = p.communicate() if p.returncode != 0: - raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, to_native(stdout), to_native(stderr))) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{to_native(stdout)}\n{to_native(stderr)}") def close(self): """ terminate the connection; nothing to do here """ diff --git a/plugins/connection/lxc.py b/plugins/connection/lxc.py index adf3eec1c1..e8e28ed804 100644 --- a/plugins/connection/lxc.py +++ b/plugins/connection/lxc.py @@ -1,34 +1,35 @@ -# -*- coding: utf-8 -*- # (c) 2015, Joerg Thalheim # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Joerg Thalheim (!UNKNOWN) - name: lxc - short_description: Run tasks in lxc containers via lxc python library +DOCUMENTATION = r""" +author: Joerg Thalheim (!UNKNOWN) +name: lxc +short_description: Run tasks in LXC containers using lxc python library +description: + - Run commands or put/fetch files to an existing LXC container using lxc python library. +options: + remote_addr: description: - - Run commands or put/fetch files to an existing lxc container using lxc python library - options: - remote_addr: - description: - - Container identifier - default: inventory_hostname - vars: - - name: ansible_host - - name: ansible_lxc_host - executable: - default: /bin/sh - description: - - Shell executable - vars: - - name: ansible_executable - - name: ansible_lxc_executable -''' + - Container identifier. + type: string + default: inventory_hostname + vars: + - name: inventory_hostname + - name: ansible_host + - name: ansible_lxc_host + executable: + default: /bin/sh + description: + - Shell executable. + type: string + vars: + - name: ansible_executable + - name: ansible_lxc_executable +""" import os import shutil @@ -59,7 +60,7 @@ class Connection(ConnectionBase): def __init__(self, play_context, new_stdin, *args, **kwargs): super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) - self.container_name = self._play_context.remote_addr + self.container_name = None self.container = None def _connect(self): @@ -67,16 +68,19 @@ class Connection(ConnectionBase): super(Connection, self)._connect() if not HAS_LIBLXC: - msg = "lxc bindings for python2 are not installed" + msg = "lxc python bindings are not installed" raise errors.AnsibleError(msg) - if self.container: + container_name = self.get_option('remote_addr') + if self.container and self.container_name == container_name: return + self.container_name = container_name + self._display.vvv("THIS IS A LOCAL LXC DIR", host=self.container_name) self.container = _lxc.Container(self.container_name) if self.container.state == "STOPPED": - raise errors.AnsibleError("%s is not running" % self.container_name) + raise errors.AnsibleError(f"{self.container_name} is not running") @staticmethod def _communicate(pid, in_data, stdin, stdout, stderr): @@ -117,7 +121,7 @@ class Connection(ConnectionBase): super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) # python2-lxc needs bytes. python3-lxc needs text. - executable = to_native(self._play_context.executable, errors='surrogate_or_strict') + executable = to_native(self.get_option('executable'), errors='surrogate_or_strict') local_cmd = [executable, '-c', to_native(cmd, errors='surrogate_or_strict')] read_stdout, write_stdout = None, None @@ -138,10 +142,10 @@ class Connection(ConnectionBase): read_stdin, write_stdin = os.pipe() kwargs['stdin'] = self._set_nonblocking(read_stdin) - self._display.vvv("EXEC %s" % (local_cmd), host=self.container_name) + self._display.vvv(f"EXEC {local_cmd}", host=self.container_name) pid = self.container.attach(_lxc.attach_run_command, local_cmd, **kwargs) if pid == -1: - msg = "failed to attach to container %s" % self.container_name + msg = f"failed to attach to container {self.container_name}" raise errors.AnsibleError(msg) write_stdout = os.close(write_stdout) @@ -168,18 +172,18 @@ class Connection(ConnectionBase): def put_file(self, in_path, out_path): ''' transfer a file from local to lxc ''' super(Connection, self).put_file(in_path, out_path) - self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.container_name) + self._display.vvv(f"PUT {in_path} TO {out_path}", host=self.container_name) in_path = to_bytes(in_path, errors='surrogate_or_strict') out_path = to_bytes(out_path, errors='surrogate_or_strict') if not os.path.exists(in_path): - msg = "file or module does not exist: %s" % in_path + msg = f"file or module does not exist: {in_path}" raise errors.AnsibleFileNotFound(msg) try: src_file = open(in_path, "rb") except IOError: traceback.print_exc() - raise errors.AnsibleError("failed to open input file to %s" % in_path) + raise errors.AnsibleError(f"failed to open input file to {in_path}") try: def write_file(args): with open(out_path, 'wb+') as dst_file: @@ -188,7 +192,7 @@ class Connection(ConnectionBase): self.container.attach_wait(write_file, None) except IOError: traceback.print_exc() - msg = "failed to transfer file to %s" % out_path + msg = f"failed to transfer file to {out_path}" raise errors.AnsibleError(msg) finally: src_file.close() @@ -196,7 +200,7 @@ class Connection(ConnectionBase): def fetch_file(self, in_path, out_path): ''' fetch a file from lxc to local ''' super(Connection, self).fetch_file(in_path, out_path) - self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.container_name) + self._display.vvv(f"FETCH {in_path} TO {out_path}", host=self.container_name) in_path = to_bytes(in_path, errors='surrogate_or_strict') out_path = to_bytes(out_path, errors='surrogate_or_strict') @@ -204,7 +208,7 @@ class Connection(ConnectionBase): dst_file = open(out_path, "wb") except IOError: traceback.print_exc() - msg = "failed to open output file %s" % out_path + msg = f"failed to open output file {out_path}" raise errors.AnsibleError(msg) try: def write_file(args): @@ -219,7 +223,7 @@ class Connection(ConnectionBase): self.container.attach_wait(write_file, None) except IOError: traceback.print_exc() - msg = "failed to transfer file from %s to %s" % (in_path, out_path) + msg = f"failed to transfer file from {in_path} to {out_path}" raise errors.AnsibleError(msg) finally: dst_file.close() diff --git a/plugins/connection/lxd.py b/plugins/connection/lxd.py index affb87dfd0..d4d3b45d0a 100644 --- a/plugins/connection/lxd.py +++ b/plugins/connection/lxd.py @@ -1,48 +1,77 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2016 Matt Clay # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Matt Clay (@mattclay) - name: lxd - short_description: Run tasks in lxc containers via lxc CLI +DOCUMENTATION = r""" +author: Matt Clay (@mattclay) +name: lxd +short_description: Run tasks in LXD instances using C(lxc) CLI +description: + - Run commands or put/fetch files to an existing instance using C(lxc) CLI. +options: + remote_addr: description: - - Run commands or put/fetch files to an existing lxc container using lxc CLI - options: - remote_addr: - description: - - Container identifier. - default: inventory_hostname - vars: - - name: inventory_hostname - - name: ansible_host - - name: ansible_lxd_host - executable: - description: - - shell to use for execution inside container - default: /bin/sh - vars: - - name: ansible_executable - - name: ansible_lxd_executable - remote: - description: - - Name of the LXD remote to use. - default: local - vars: - - name: ansible_lxd_remote - version_added: 2.0.0 - project: - description: - - Name of the LXD project to use. - vars: - - name: ansible_lxd_project - version_added: 2.0.0 -''' + - Instance (container/VM) identifier. + - Since community.general 8.0.0, a FQDN can be provided; in that case, the first component (the part before C(.)) is + used as the instance identifier. + type: string + default: inventory_hostname + vars: + - name: inventory_hostname + - name: ansible_host + - name: ansible_lxd_host + executable: + description: + - Shell to use for execution inside instance. + type: string + default: /bin/sh + vars: + - name: ansible_executable + - name: ansible_lxd_executable + lxd_become_method: + description: + - Become command used to switch to a non-root user. + - Is only used when O(remote_user) is not V(root). + type: str + default: /bin/su + vars: + - name: lxd_become_method + version_added: 10.4.0 + remote: + description: + - Name of the LXD remote to use. + type: string + default: local + vars: + - name: ansible_lxd_remote + version_added: 2.0.0 + remote_user: + description: + - User to login/authenticate as. + - Can be set from the CLI with the C(--user) or C(-u) options. + type: string + default: root + vars: + - name: ansible_user + env: + - name: ANSIBLE_REMOTE_USER + ini: + - section: defaults + key: remote_user + keyword: + - name: remote_user + version_added: 10.4.0 + project: + description: + - Name of the LXD project to use. + type: string + vars: + - name: ansible_lxd_project + version_added: 2.0.0 +""" import os from subprocess import Popen, PIPE @@ -58,7 +87,6 @@ class Connection(ConnectionBase): transport = 'community.general.lxd' has_pipelining = True - default_user = 'root' def __init__(self, play_context, new_stdin, *args, **kwargs): super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) @@ -68,32 +96,50 @@ class Connection(ConnectionBase): except ValueError: raise AnsibleError("lxc command not found in PATH") - if self._play_context.remote_user is not None and self._play_context.remote_user != 'root': - self._display.warning('lxd does not support remote_user, using container default: root') + def _host(self): + """ translate remote_addr to lxd (short) hostname """ + return self.get_option("remote_addr").split(".", 1)[0] def _connect(self): """connect to lxd (nothing to do here) """ super(Connection, self)._connect() if not self._connected: - self._display.vvv(u"ESTABLISH LXD CONNECTION FOR USER: root", host=self.get_option('remote_addr')) + self._display.vvv(f"ESTABLISH LXD CONNECTION FOR USER: {self.get_option('remote_user')}", host=self._host()) self._connected = True + def _build_command(self, cmd) -> str: + """build the command to execute on the lxd host""" + + exec_cmd = [self._lxc_cmd] + + if self.get_option("project"): + exec_cmd.extend(["--project", self.get_option("project")]) + + exec_cmd.extend(["exec", f"{self.get_option('remote')}:{self._host()}", "--"]) + + if self.get_option("remote_user") != "root": + self._display.vvv( + f"INFO: Running as non-root user: {self.get_option('remote_user')}, \ + trying to run 'lxc exec' with become method: {self.get_option('lxd_become_method')}", + host=self._host(), + ) + exec_cmd.extend( + [self.get_option("lxd_become_method"), self.get_option("remote_user"), "-c"] + ) + + exec_cmd.extend([self.get_option("executable"), "-c", cmd]) + + return exec_cmd + def exec_command(self, cmd, in_data=None, sudoable=True): """ execute a command on the lxd host """ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) - self._display.vvv(u"EXEC {0}".format(cmd), host=self.get_option('remote_addr')) + self._display.vvv(f"EXEC {cmd}", host=self._host()) - local_cmd = [self._lxc_cmd] - if self.get_option("project"): - local_cmd.extend(["--project", self.get_option("project")]) - local_cmd.extend([ - "exec", - "%s:%s" % (self.get_option("remote"), self.get_option("remote_addr")), - "--", - self.get_option("executable"), "-c", cmd - ]) + local_cmd = self._build_command(cmd) + self._display.vvvvv(f"EXEC {local_cmd}", host=self._host()) local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru') @@ -104,31 +150,73 @@ class Connection(ConnectionBase): stdout = to_text(stdout) stderr = to_text(stderr) - if stderr == "error: Container is not running.\n": - raise AnsibleConnectionFailure("container not running: %s" % self.get_option('remote_addr')) + self._display.vvvvv(f"EXEC lxc output: {stdout} {stderr}", host=self._host()) - if stderr == "error: not found\n": - raise AnsibleConnectionFailure("container not found: %s" % self.get_option('remote_addr')) + if "is not running" in stderr: + raise AnsibleConnectionFailure(f"instance not running: {self._host()}") + + if stderr.strip() == "Error: Instance not found" or stderr.strip() == "error: not found": + raise AnsibleConnectionFailure(f"instance not found: {self._host()}") return process.returncode, stdout, stderr + def _get_remote_uid_gid(self) -> tuple[int, int]: + """Get the user and group ID of 'remote_user' from the instance.""" + + rc, uid_out, err = self.exec_command("/bin/id -u") + if rc != 0: + raise AnsibleError( + f"Failed to get remote uid for user {self.get_option('remote_user')}: {err}" + ) + uid = uid_out.strip() + + rc, gid_out, err = self.exec_command("/bin/id -g") + if rc != 0: + raise AnsibleError( + f"Failed to get remote gid for user {self.get_option('remote_user')}: {err}" + ) + gid = gid_out.strip() + + return int(uid), int(gid) + def put_file(self, in_path, out_path): """ put a file from local to lxd """ super(Connection, self).put_file(in_path, out_path) - self._display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self.get_option('remote_addr')) + self._display.vvv(f"PUT {in_path} TO {out_path}", host=self._host()) if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')): - raise AnsibleFileNotFound("input path is not a file: %s" % in_path) + raise AnsibleFileNotFound(f"input path is not a file: {in_path}") local_cmd = [self._lxc_cmd] if self.get_option("project"): local_cmd.extend(["--project", self.get_option("project")]) - local_cmd.extend([ - "file", "push", - in_path, - "%s:%s/%s" % (self.get_option("remote"), self.get_option("remote_addr"), out_path) - ]) + + if self.get_option("remote_user") != "root": + uid, gid = self._get_remote_uid_gid() + local_cmd.extend( + [ + "file", + "push", + "--uid", + str(uid), + "--gid", + str(gid), + in_path, + f"{self.get_option('remote')}:{self._host()}/{out_path}", + ] + ) + else: + local_cmd.extend( + [ + "file", + "push", + in_path, + f"{self.get_option('remote')}:{self._host()}/{out_path}", + ] + ) + + self._display.vvvvv(f"PUT {local_cmd}", host=self._host()) local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd] @@ -139,14 +227,14 @@ class Connection(ConnectionBase): """ fetch a file from lxd to local """ super(Connection, self).fetch_file(in_path, out_path) - self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self.get_option('remote_addr')) + self._display.vvv(f"FETCH {in_path} TO {out_path}", host=self._host()) local_cmd = [self._lxc_cmd] if self.get_option("project"): local_cmd.extend(["--project", self.get_option("project")]) local_cmd.extend([ "file", "pull", - "%s:%s/%s" % (self.get_option("remote"), self.get_option("remote_addr"), in_path), + f"{self.get_option('remote')}:{self._host()}/{in_path}", out_path ]) diff --git a/plugins/connection/qubes.py b/plugins/connection/qubes.py index 25594e952b..8d69594b22 100644 --- a/plugins/connection/qubes.py +++ b/plugins/connection/qubes.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Based on the buildah connection plugin # Copyright (c) 2017 Ansible Project # 2018 Kushal Das @@ -8,36 +7,36 @@ # # Written by: Kushal Das (https://github.com/kushaldas) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: qubes - short_description: Interact with an existing QubesOS AppVM +DOCUMENTATION = r""" +name: qubes +short_description: Interact with an existing QubesOS AppVM +description: + - Run commands or put/fetch files to an existing Qubes AppVM using qubes tools. +author: Kushal Das (@kushaldas) + + +options: + remote_addr: description: - - Run commands or put/fetch files to an existing Qubes AppVM using qubes tools. - - author: Kushal Das (@kushaldas) - - - options: - remote_addr: - description: - - vm name - default: inventory_hostname - vars: - - name: ansible_host - remote_user: - description: - - The user to execute as inside the vm. - default: The *user* account as default in Qubes OS. - vars: - - name: ansible_user + - VM name. + type: string + default: inventory_hostname + vars: + - name: ansible_host + remote_user: + description: + - The user to execute as inside the VM. + type: string + default: The I(user) account as default in Qubes OS. + vars: + - name: ansible_user # keyword: # - name: hosts -''' +""" import subprocess @@ -76,7 +75,7 @@ class Connection(ConnectionBase): """ display.vvvv("CMD: ", cmd) if not cmd.endswith("\n"): - cmd = cmd + "\n" + cmd = f"{cmd}\n" local_cmd = [] # For dom0 @@ -93,7 +92,7 @@ class Connection(ConnectionBase): display.vvvv("Local cmd: ", local_cmd) - display.vvv("RUN %s" % (local_cmd,), host=self._remote_vmname) + display.vvv(f"RUN {local_cmd}", host=self._remote_vmname) p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -112,42 +111,42 @@ class Connection(ConnectionBase): """Run specified command in a running QubesVM """ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) - display.vvvv("CMD IS: %s" % cmd) + display.vvvv(f"CMD IS: {cmd}") rc, stdout, stderr = self._qubes(cmd) - display.vvvvv("STDOUT %r STDERR %r" % (stderr, stderr)) + display.vvvvv(f"STDOUT {stdout!r} STDERR {stderr!r}") return rc, stdout, stderr def put_file(self, in_path, out_path): """ Place a local file located in 'in_path' inside VM at 'out_path' """ super(Connection, self).put_file(in_path, out_path) - display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._remote_vmname) + display.vvv(f"PUT {in_path} TO {out_path}", host=self._remote_vmname) with open(in_path, "rb") as fobj: source_data = fobj.read() - retcode, dummy, dummy = self._qubes('cat > "{0}"\n'.format(out_path), source_data, "qubes.VMRootShell") + retcode, dummy, dummy = self._qubes(f'cat > "{out_path}\"\n', source_data, "qubes.VMRootShell") # if qubes.VMRootShell service not supported, fallback to qubes.VMShell and # hope it will have appropriate permissions if retcode == 127: - retcode, dummy, dummy = self._qubes('cat > "{0}"\n'.format(out_path), source_data) + retcode, dummy, dummy = self._qubes(f'cat > "{out_path}\"\n', source_data) if retcode != 0: - raise AnsibleConnectionFailure('Failed to put_file to {0}'.format(out_path)) + raise AnsibleConnectionFailure(f'Failed to put_file to {out_path}') def fetch_file(self, in_path, out_path): """Obtain file specified via 'in_path' from the container and place it at 'out_path' """ super(Connection, self).fetch_file(in_path, out_path) - display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._remote_vmname) + display.vvv(f"FETCH {in_path} TO {out_path}", host=self._remote_vmname) # We are running in dom0 - cmd_args_list = ["qvm-run", "--pass-io", self._remote_vmname, "cat {0}".format(in_path)] + cmd_args_list = ["qvm-run", "--pass-io", self._remote_vmname, f"cat {in_path}"] with open(out_path, "wb") as fobj: p = subprocess.Popen(cmd_args_list, shell=False, stdout=fobj) p.communicate() if p.returncode != 0: - raise AnsibleConnectionFailure('Failed to fetch file to {0}'.format(out_path)) + raise AnsibleConnectionFailure(f'Failed to fetch file to {out_path}') def close(self): """ Closing the connection """ diff --git a/plugins/connection/saltstack.py b/plugins/connection/saltstack.py index 1dbc7296c7..b09ffcd787 100644 --- a/plugins/connection/saltstack.py +++ b/plugins/connection/saltstack.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Based on local.py (c) 2012, Michael DeHaan # Based on chroot.py (c) 2013, Maykel Moya # Based on func.py @@ -7,16 +6,15 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Michael Scherer (@mscherer) - name: saltstack - short_description: Allow ansible to piggyback on salt minions - description: - - This allows you to use existing Saltstack infrastructure to connect to targets. -''' +DOCUMENTATION = r""" +author: Michael Scherer (@mscherer) +name: saltstack +short_description: Allow ansible to piggyback on salt minions +description: + - This allows you to use existing Saltstack infrastructure to connect to targets. +""" import os import base64 @@ -59,11 +57,11 @@ class Connection(ConnectionBase): if in_data: raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining") - self._display.vvv("EXEC %s" % cmd, host=self.host) + self._display.vvv(f"EXEC {cmd}", host=self.host) # need to add 'true;' to work around https://github.com/saltstack/salt/issues/28077 - res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', 'true;' + cmd]) + res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', f"true;{cmd}"]) if self.host not in res: - raise errors.AnsibleError("Minion %s didn't answer, check if salt-minion is running and the name is correct" % self.host) + raise errors.AnsibleError(f"Minion {self.host} didn't answer, check if salt-minion is running and the name is correct") p = res[self.host] return p['retcode'], p['stdout'], p['stderr'] @@ -81,7 +79,7 @@ class Connection(ConnectionBase): super(Connection, self).put_file(in_path, out_path) out_path = self._normalize_path(out_path, '/') - self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host) + self._display.vvv(f"PUT {in_path} TO {out_path}", host=self.host) with open(in_path, 'rb') as in_fh: content = in_fh.read() self.client.cmd(self.host, 'hashutil.base64_decodefile', [base64.b64encode(content), out_path]) @@ -93,7 +91,7 @@ class Connection(ConnectionBase): super(Connection, self).fetch_file(in_path, out_path) in_path = self._normalize_path(in_path, '/') - self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host) + self._display.vvv(f"FETCH {in_path} TO {out_path}", host=self.host) content = self.client.cmd(self.host, 'cp.get_file_str', [in_path])[self.host] open(out_path, 'wb').write(content) diff --git a/plugins/connection/wsl.py b/plugins/connection/wsl.py new file mode 100644 index 0000000000..3b768eebf8 --- /dev/null +++ b/plugins/connection/wsl.py @@ -0,0 +1,790 @@ +# Derived from ansible/plugins/connection/proxmox_pct_remote.py (c) 2024 Nils Stein (@mietzen) +# Derived from ansible/plugins/connection/paramiko_ssh.py (c) 2012, Michael DeHaan +# Copyright (c) 2025 Rui Lopes (@rgl) +# Copyright (c) 2025 Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +author: Rui Lopes (@rgl) +name: wsl +short_description: Run tasks in WSL distribution using wsl.exe CLI using SSH +requirements: + - paramiko +description: + - Run commands or put/fetch files to an existing WSL distribution using wsl.exe CLI using SSH. + - Uses the Python SSH implementation (Paramiko) to connect to the WSL host. +version_added: "10.6.0" +options: + remote_addr: + description: + - Address of the remote target. + default: inventory_hostname + type: string + vars: + - name: inventory_hostname + - name: ansible_host + - name: ansible_ssh_host + - name: ansible_paramiko_host + port: + description: Remote port to connect to. + type: int + default: 22 + ini: + - section: defaults + key: remote_port + - section: paramiko_connection + key: remote_port + env: + - name: ANSIBLE_REMOTE_PORT + - name: ANSIBLE_REMOTE_PARAMIKO_PORT + vars: + - name: ansible_port + - name: ansible_ssh_port + - name: ansible_paramiko_port + keyword: + - name: port + remote_user: + description: + - User to login/authenticate as. + - Can be set from the CLI with the C(--user) or C(-u) options. + type: string + vars: + - name: ansible_user + - name: ansible_ssh_user + - name: ansible_paramiko_user + env: + - name: ANSIBLE_REMOTE_USER + - name: ANSIBLE_PARAMIKO_REMOTE_USER + ini: + - section: defaults + key: remote_user + - section: paramiko_connection + key: remote_user + keyword: + - name: remote_user + password: + description: + - Secret used to either login the SSH server or as a passphrase for SSH keys that require it. + - Can be set from the CLI with the C(--ask-pass) option. + type: string + vars: + - name: ansible_password + - name: ansible_ssh_pass + - name: ansible_ssh_password + - name: ansible_paramiko_pass + - name: ansible_paramiko_password + use_rsa_sha2_algorithms: + description: + - Whether or not to enable RSA SHA2 algorithms for pubkeys and hostkeys. + - On paramiko versions older than 2.9, this only affects hostkeys. + - For behavior matching paramiko<2.9 set this to V(false). + vars: + - name: ansible_paramiko_use_rsa_sha2_algorithms + ini: + - {key: use_rsa_sha2_algorithms, section: paramiko_connection} + env: + - {name: ANSIBLE_PARAMIKO_USE_RSA_SHA2_ALGORITHMS} + default: true + type: boolean + host_key_auto_add: + description: "Automatically add host keys to C(~/.ssh/known_hosts)." + env: + - name: ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD + ini: + - key: host_key_auto_add + section: paramiko_connection + type: boolean + look_for_keys: + default: true + description: "Set to V(false) to disable searching for private key files in C(~/.ssh/)." + env: + - name: ANSIBLE_PARAMIKO_LOOK_FOR_KEYS + ini: + - {key: look_for_keys, section: paramiko_connection} + type: boolean + proxy_command: + default: "" + description: + - Proxy information for running the connection through a jumphost. + - This option is supported by paramiko version 1.9.0 or newer. + type: string + env: + - name: ANSIBLE_PARAMIKO_PROXY_COMMAND + ini: + - {key: proxy_command, section: paramiko_connection} + vars: + - name: ansible_paramiko_proxy_command + record_host_keys: + default: true + description: "Save the host keys to a file." + env: + - name: ANSIBLE_PARAMIKO_RECORD_HOST_KEYS + ini: + - section: paramiko_connection + key: record_host_keys + type: boolean + host_key_checking: + description: "Set this to V(false) if you want to avoid host key checking by the underlying tools Ansible uses to connect + to the host." + type: boolean + default: true + env: + - name: ANSIBLE_HOST_KEY_CHECKING + - name: ANSIBLE_SSH_HOST_KEY_CHECKING + - name: ANSIBLE_PARAMIKO_HOST_KEY_CHECKING + ini: + - section: defaults + key: host_key_checking + - section: paramiko_connection + key: host_key_checking + vars: + - name: ansible_host_key_checking + - name: ansible_ssh_host_key_checking + - name: ansible_paramiko_host_key_checking + use_persistent_connections: + description: "Toggles the use of persistence for connections." + type: boolean + default: false + env: + - name: ANSIBLE_USE_PERSISTENT_CONNECTIONS + ini: + - section: defaults + key: use_persistent_connections + banner_timeout: + type: float + default: 30 + description: + - Configures, in seconds, the amount of time to wait for the SSH banner to be presented. + - This option is supported by paramiko version 1.15.0 or newer. + ini: + - section: paramiko_connection + key: banner_timeout + env: + - name: ANSIBLE_PARAMIKO_BANNER_TIMEOUT + timeout: + type: int + default: 10 + description: + - Number of seconds until the plugin gives up on failing to establish a TCP connection. + - This option is supported by paramiko version 2.2.0 or newer. + ini: + - section: defaults + key: timeout + - section: ssh_connection + key: timeout + - section: paramiko_connection + key: timeout + env: + - name: ANSIBLE_TIMEOUT + - name: ANSIBLE_SSH_TIMEOUT + - name: ANSIBLE_PARAMIKO_TIMEOUT + vars: + - name: ansible_ssh_timeout + - name: ansible_paramiko_timeout + cli: + - name: timeout + lock_file_timeout: + type: int + default: 60 + description: Number of seconds until the plugin gives up on trying to write a lock file when writing SSH known host keys. + vars: + - name: ansible_lock_file_timeout + env: + - name: ANSIBLE_LOCK_FILE_TIMEOUT + private_key_file: + description: + - Path to private key file to use for authentication. + type: path + ini: + - section: defaults + key: private_key_file + - section: paramiko_connection + key: private_key_file + env: + - name: ANSIBLE_PRIVATE_KEY_FILE + - name: ANSIBLE_PARAMIKO_PRIVATE_KEY_FILE + vars: + - name: ansible_private_key_file + - name: ansible_ssh_private_key_file + - name: ansible_paramiko_private_key_file + cli: + - name: private_key_file + option: "--private-key" + user_known_hosts_file: + description: + - Path to the user known hosts file. + - Used to verify the ssh hosts keys. + type: path + default: ~/.ssh/known_hosts + ini: + - section: paramiko_connection + key: user_known_hosts_file + vars: + - name: ansible_paramiko_user_known_hosts_file + wsl_distribution: + description: + - WSL distribution name. + type: string + required: true + vars: + - name: wsl_distribution + wsl_user: + description: + - WSL distribution user. + type: string + vars: + - name: wsl_user + become_user: + description: + - WSL distribution user. + type: string + default: root + vars: + - name: become_user + - name: ansible_become_user + become: + description: + - Whether to use the user defined by O(become_user). + type: bool + default: false + vars: + - name: become + - name: ansible_become +""" + +EXAMPLES = r""" +# ------------------------ +# Inventory: inventory.yml +# ------------------------ +--- +all: + children: + wsl: + hosts: + example-wsl-ubuntu: + ansible_host: 10.0.0.10 + wsl_distribution: ubuntu + wsl_user: ubuntu + vars: + ansible_connection: community.general.wsl + ansible_user: vagrant +# ---------------------- +# Playbook: playbook.yml +# ---------------------- +--- +- name: WSL Example + hosts: wsl + gather_facts: true + become: true + tasks: + - name: Ping + ansible.builtin.ping: + - name: Id (with become false) + become: false + changed_when: false + args: + executable: /bin/bash + ansible.builtin.shell: | + exec 2>&1 + set -x + echo "$0" + pwd + id + - name: Id (with become true) + changed_when: false + args: + executable: /bin/bash + ansible.builtin.shell: | + exec 2>&1 + set -x + echo "$0" + pwd + id + - name: Reboot + ansible.builtin.reboot: + boot_time_command: systemctl show -p ActiveEnterTimestamp init.scope +""" + +import io +import os +import pathlib +import shlex +import socket +import tempfile +import traceback +import typing as t + +from ansible.errors import ( + AnsibleAuthenticationFailure, + AnsibleConnectionFailure, + AnsibleError, +) +from ansible_collections.community.general.plugins.module_utils._filelock import FileLock, LockTimeout +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion +from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text +from ansible.playbook.play_context import PlayContext +from ansible.plugins.connection import ConnectionBase +from ansible.utils.display import Display +from ansible.utils.path import makedirs_safe +from binascii import hexlify +from subprocess import list2cmdline + +try: + import paramiko + PARAMIKO_IMPORT_ERR = None +except ImportError: + paramiko = None + PARAMIKO_IMPORT_ERR = traceback.format_exc() + + +if t.TYPE_CHECKING and PARAMIKO_IMPORT_ERR is None: + from paramiko import MissingHostKeyPolicy + from paramiko.client import SSHClient + from paramiko.pkey import PKey +else: + MissingHostKeyPolicy: type = object + SSHClient: type = object + PKey: type = object + + +display = Display() + + +def authenticity_msg(hostname: str, ktype: str, fingerprint: str) -> str: + msg = f""" + paramiko: The authenticity of host '{hostname}' can't be established. + The {ktype} key fingerprint is {fingerprint}. + Are you sure you want to continue connecting (yes/no)? + """ + return msg + + +class MyAddPolicy(MissingHostKeyPolicy): + """ + Based on AutoAddPolicy in paramiko so we can determine when keys are added + + and also prompt for input. + + Policy for automatically adding the hostname and new host key to the + local L{HostKeys} object, and saving it. This is used by L{SSHClient}. + """ + + def __init__(self, connection: Connection) -> None: + self.connection = connection + self._options = connection._options + + def missing_host_key(self, client: SSHClient, hostname: str, key: PKey) -> None: + + if all((self.connection.get_option('host_key_checking'), not self.connection.get_option('host_key_auto_add'))): + + fingerprint = hexlify(key.get_fingerprint()) + ktype = key.get_name() + + if self.connection.get_option('use_persistent_connections') or self.connection.force_persistence: + # don't print the prompt string since the user cannot respond + # to the question anyway + raise AnsibleError(authenticity_msg(hostname, ktype, fingerprint)[1:92]) + + inp = to_text( + display.prompt_until(authenticity_msg(hostname, ktype, fingerprint), private=False), + errors='surrogate_or_strict' + ) + + if inp.lower() not in ['yes', 'y', '']: + raise AnsibleError('host connection rejected by user') + + key._added_by_ansible_this_time = True + + # existing implementation below: + client._host_keys.add(hostname, key.get_name(), key) + + # host keys are actually saved in close() function below + # in order to control ordering. + + +class Connection(ConnectionBase): + """ SSH based connections (paramiko) to WSL """ + + transport = 'community.general.wsl' + _log_channel: str | None = None + + def __init__(self, play_context: PlayContext, new_stdin: io.TextIOWrapper | None = None, *args: t.Any, **kwargs: t.Any): + super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs) + + def _set_log_channel(self, name: str) -> None: + """ Mimic paramiko.SSHClient.set_log_channel """ + self._log_channel = name + + def _parse_proxy_command(self, port: int = 22) -> dict[str, t.Any]: + proxy_command = self.get_option('proxy_command') or None + + sock_kwarg = {} + if proxy_command: + replacers: t.Dict[str, str] = { + '%h': self.get_option('remote_addr'), + '%p': str(port), + '%r': self.get_option('remote_user') + } + for find, replace in replacers.items(): + proxy_command = proxy_command.replace(find, replace) + try: + sock_kwarg = {'sock': paramiko.ProxyCommand(proxy_command)} + display.vvv(f'CONFIGURE PROXY COMMAND FOR CONNECTION: {proxy_command}', host=self.get_option('remote_addr')) + except AttributeError: + display.warning('Paramiko ProxyCommand support unavailable. ' + 'Please upgrade to Paramiko 1.9.0 or newer. ' + 'Not using configured ProxyCommand') + + return sock_kwarg + + def _connect(self) -> Connection: + """ activates the connection object """ + + if PARAMIKO_IMPORT_ERR is not None: + raise AnsibleError(f'paramiko is not installed: {to_native(PARAMIKO_IMPORT_ERR)}') + + port = self.get_option('port') + display.vvv(f'ESTABLISH PARAMIKO SSH CONNECTION FOR USER: {self.get_option("remote_user")} on PORT {to_text(port)} TO {self.get_option("remote_addr")}', + host=self.get_option('remote_addr')) + + ssh = paramiko.SSHClient() + + # Set pubkey and hostkey algorithms to disable, the only manipulation allowed currently + # is keeping or omitting rsa-sha2 algorithms + # default_keys: t.Tuple[str] = () + paramiko_preferred_pubkeys = getattr(paramiko.Transport, '_preferred_pubkeys', ()) + paramiko_preferred_hostkeys = getattr(paramiko.Transport, '_preferred_keys', ()) + use_rsa_sha2_algorithms = self.get_option('use_rsa_sha2_algorithms') + disabled_algorithms: t.Dict[str, t.Iterable[str]] = {} + if not use_rsa_sha2_algorithms: + if paramiko_preferred_pubkeys: + disabled_algorithms['pubkeys'] = tuple(a for a in paramiko_preferred_pubkeys if 'rsa-sha2' in a) + if paramiko_preferred_hostkeys: + disabled_algorithms['keys'] = tuple(a for a in paramiko_preferred_hostkeys if 'rsa-sha2' in a) + + # override paramiko's default logger name + if self._log_channel is not None: + ssh.set_log_channel(self._log_channel) + + self.keyfile = os.path.expanduser(self.get_option('user_known_hosts_file')) + + if self.get_option('host_key_checking'): + for ssh_known_hosts in ('/etc/ssh/ssh_known_hosts', '/etc/openssh/ssh_known_hosts', self.keyfile): + try: + ssh.load_system_host_keys(ssh_known_hosts) + break + except IOError: + pass # file was not found, but not required to function + except paramiko.hostkeys.InvalidHostKey as e: + raise AnsibleConnectionFailure(f'Invalid host key: {to_text(e.line)}') + try: + ssh.load_system_host_keys() + except paramiko.hostkeys.InvalidHostKey as e: + raise AnsibleConnectionFailure(f'Invalid host key: {to_text(e.line)}') + + ssh_connect_kwargs = self._parse_proxy_command(port) + ssh.set_missing_host_key_policy(MyAddPolicy(self)) + conn_password = self.get_option('password') + allow_agent = True + + if conn_password is not None: + allow_agent = False + + try: + key_filename = None + if self.get_option('private_key_file'): + key_filename = os.path.expanduser(self.get_option('private_key_file')) + + # paramiko 2.2 introduced auth_timeout parameter + if LooseVersion(paramiko.__version__) >= LooseVersion('2.2.0'): + ssh_connect_kwargs['auth_timeout'] = self.get_option('timeout') + + # paramiko 1.15 introduced banner timeout parameter + if LooseVersion(paramiko.__version__) >= LooseVersion('1.15.0'): + ssh_connect_kwargs['banner_timeout'] = self.get_option('banner_timeout') + + ssh.connect( + self.get_option('remote_addr').lower(), + username=self.get_option('remote_user'), + allow_agent=allow_agent, + look_for_keys=self.get_option('look_for_keys'), + key_filename=key_filename, + password=conn_password, + timeout=self.get_option('timeout'), + port=port, + disabled_algorithms=disabled_algorithms, + **ssh_connect_kwargs, + ) + except paramiko.ssh_exception.BadHostKeyException as e: + raise AnsibleConnectionFailure(f'host key mismatch for {to_text(e.hostname)}') + except paramiko.ssh_exception.AuthenticationException as e: + msg = f'Failed to authenticate: {e}' + raise AnsibleAuthenticationFailure(msg) + except Exception as e: + msg = to_text(e) + if 'PID check failed' in msg: + raise AnsibleError('paramiko version issue, please upgrade paramiko on the machine running ansible') + elif 'Private key file is encrypted' in msg: + msg = ( + f'ssh {self.get_option("remote_user")}@{self.get_options("remote_addr")}:{port} : ' + f'{msg}\nTo connect as a different user, use -u .' + ) + raise AnsibleConnectionFailure(msg) + else: + raise AnsibleConnectionFailure(msg) + self.ssh = ssh + self._connected = True + return self + + def _any_keys_added(self) -> bool: + for hostname, keys in self.ssh._host_keys.items(): + for keytype, key in keys.items(): + added_this_time = getattr(key, '_added_by_ansible_this_time', False) + if added_this_time: + return True + return False + + def _save_ssh_host_keys(self, filename: str) -> None: + """ + not using the paramiko save_ssh_host_keys function as we want to add new SSH keys at the bottom so folks + don't complain about it :) + """ + + if not self._any_keys_added(): + return + + path = os.path.expanduser('~/.ssh') + makedirs_safe(path) + + with open(filename, 'w') as f: + for hostname, keys in self.ssh._host_keys.items(): + for keytype, key in keys.items(): + # was f.write + added_this_time = getattr(key, '_added_by_ansible_this_time', False) + if not added_this_time: + f.write(f'{hostname} {keytype} {key.get_base64()}\n') + + for hostname, keys in self.ssh._host_keys.items(): + for keytype, key in keys.items(): + added_this_time = getattr(key, '_added_by_ansible_this_time', False) + if added_this_time: + f.write(f'{hostname} {keytype} {key.get_base64()}\n') + + def _build_wsl_command(self, cmd: str) -> str: + wsl_distribution = self.get_option('wsl_distribution') + become = self.get_option('become') + become_user = self.get_option('become_user') + if become and become_user: + wsl_user = become_user + else: + wsl_user = self.get_option('wsl_user') + args = ['wsl.exe', '--distribution', wsl_distribution] + if wsl_user: + args.extend(['--user', wsl_user]) + args.extend(['--']) + args.extend(shlex.split(cmd)) + if os.getenv('_ANSIBLE_TEST_WSL_CONNECTION_PLUGIN_Waeri5tepheeSha2fae8'): + return shlex.join(args) + return list2cmdline(args) # see https://github.com/python/cpython/blob/3.11/Lib/subprocess.py#L576 + + def exec_command(self, cmd: str, in_data: bytes | None = None, sudoable: bool = True) -> tuple[int, bytes, bytes]: + """ run a command on inside a WSL distribution """ + + cmd = self._build_wsl_command(cmd) + + super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable) + + bufsize = 4096 + + try: + self.ssh.get_transport().set_keepalive(5) + chan = self.ssh.get_transport().open_session() + except Exception as e: + text_e = to_text(e) + msg = 'Failed to open session' + if text_e: + msg += f': {text_e}' + raise AnsibleConnectionFailure(to_native(msg)) + + display.vvv(f'EXEC {cmd}', host=self.get_option('remote_addr')) + + cmd = to_bytes(cmd, errors='surrogate_or_strict') + + no_prompt_out = b'' + no_prompt_err = b'' + become_output = b'' + + try: + chan.exec_command(cmd) + if self.become and self.become.expect_prompt(): + password_prompt = False + become_success = False + while not (become_success or password_prompt): + display.debug('Waiting for Privilege Escalation input') + + chunk = chan.recv(bufsize) + display.debug(f'chunk is: {to_text(chunk)}') + if not chunk: + if b'unknown user' in become_output: + n_become_user = to_native(self.become.get_option('become_user')) + raise AnsibleError(f'user {n_become_user} does not exist') + else: + break + # raise AnsibleError('ssh connection closed waiting for password prompt') + become_output += chunk + + # need to check every line because we might get lectured + # and we might get the middle of a line in a chunk + for line in become_output.splitlines(True): + if self.become.check_success(line): + become_success = True + break + elif self.become.check_password_prompt(line): + password_prompt = True + break + + if password_prompt: + if self.become: + become_pass = self.become.get_option('become_pass') + chan.sendall(to_bytes(f"{become_pass}\n", errors='surrogate_or_strict')) + else: + raise AnsibleError('A password is required but none was supplied') + else: + no_prompt_out += become_output + no_prompt_err += become_output + + if in_data: + for i in range(0, len(in_data), bufsize): + chan.send(in_data[i:i + bufsize]) + chan.shutdown_write() + elif in_data == b'': + chan.shutdown_write() + + except socket.timeout: + raise AnsibleError(f'ssh timed out waiting for privilege escalation.\n{to_text(become_output)}') + + stdout = b''.join(chan.makefile('rb', bufsize)) + stderr = b''.join(chan.makefile_stderr('rb', bufsize)) + returncode = chan.recv_exit_status() + + # NB the full english error message is: + # 'wsl.exe' is not recognized as an internal or external command, + # operable program or batch file. + if "'wsl.exe' is not recognized" in stderr.decode('utf-8'): + raise AnsibleError( + f'wsl.exe not found in path of host: {to_text(self.get_option("remote_addr"))}') + + return (returncode, no_prompt_out + stdout, no_prompt_out + stderr) + + def put_file(self, in_path: str, out_path: str) -> None: + """ transfer a file from local to remote """ + + display.vvv(f'PUT {in_path} TO {out_path}', host=self.get_option('remote_addr')) + try: + with open(in_path, 'rb') as f: + data = f.read() + returncode, stdout, stderr = self.exec_command( + f"{self._shell.executable} -c {self._shell.quote(f'cat > {out_path}')}", + in_data=data, + sudoable=False) + if returncode != 0: + if 'cat: not found' in stderr.decode('utf-8'): + raise AnsibleError( + f'cat not found in path of WSL distribution: {to_text(self.get_option("wsl_distribution"))}') + raise AnsibleError( + f'{to_text(stdout)}\n{to_text(stderr)}') + except Exception as e: + raise AnsibleError( + f'error occurred while putting file from {in_path} to {out_path}!\n{to_text(e)}') + + def fetch_file(self, in_path: str, out_path: str) -> None: + """ save a remote file to the specified path """ + + display.vvv(f'FETCH {in_path} TO {out_path}', host=self.get_option('remote_addr')) + try: + returncode, stdout, stderr = self.exec_command( + f"{self._shell.executable} -c {self._shell.quote(f'cat {in_path}')}", + sudoable=False) + if returncode != 0: + if 'cat: not found' in stderr.decode('utf-8'): + raise AnsibleError( + f'cat not found in path of WSL distribution: {to_text(self.get_option("wsl_distribution"))}') + raise AnsibleError( + f'{to_text(stdout)}\n{to_text(stderr)}') + with open(out_path, 'wb') as f: + f.write(stdout) + except Exception as e: + raise AnsibleError( + f'error occurred while fetching file from {in_path} to {out_path}!\n{to_text(e)}') + + def reset(self) -> None: + """ reset the connection """ + + if not self._connected: + return + self.close() + self._connect() + + def close(self) -> None: + """ terminate the connection """ + + if self.get_option('host_key_checking') and self.get_option('record_host_keys') and self._any_keys_added(): + # add any new SSH host keys -- warning -- this could be slow + # (This doesn't acquire the connection lock because it needs + # to exclude only other known_hosts writers, not connections + # that are starting up.) + lockfile = os.path.basename(self.keyfile) + dirname = os.path.dirname(self.keyfile) + makedirs_safe(dirname) + tmp_keyfile_name = None + try: + with FileLock().lock_file(lockfile, dirname, self.get_option('lock_file_timeout')): + # just in case any were added recently + + self.ssh.load_system_host_keys() + self.ssh._host_keys.update(self.ssh._system_host_keys) + + # gather information about the current key file, so + # we can ensure the new file has the correct mode/owner + + key_dir = os.path.dirname(self.keyfile) + if os.path.exists(self.keyfile): + key_stat = os.stat(self.keyfile) + mode = key_stat.st_mode & 0o777 + uid = key_stat.st_uid + gid = key_stat.st_gid + else: + mode = 0o644 + uid = os.getuid() + gid = os.getgid() + + # Save the new keys to a temporary file and move it into place + # rather than rewriting the file. We set delete=False because + # the file will be moved into place rather than cleaned up. + + with tempfile.NamedTemporaryFile(dir=key_dir, delete=False) as tmp_keyfile: + tmp_keyfile_name = tmp_keyfile.name + os.chmod(tmp_keyfile_name, mode) + os.chown(tmp_keyfile_name, uid, gid) + self._save_ssh_host_keys(tmp_keyfile_name) + + os.rename(tmp_keyfile_name, self.keyfile) + except LockTimeout: + raise AnsibleError( + f'writing lock file for {self.keyfile} ran in to the timeout of {self.get_option("lock_file_timeout")}s') + except paramiko.hostkeys.InvalidHostKey as e: + raise AnsibleConnectionFailure(f'Invalid host key: {e.line}') + except Exception as e: + # unable to save keys, including scenario when key was invalid + # and caught earlier + raise AnsibleError( + f'error occurred while writing SSH host keys!\n{to_text(e)}') + finally: + if tmp_keyfile_name is not None: + pathlib.Path(tmp_keyfile_name).unlink(missing_ok=True) + + self.ssh.close() + self._connected = False diff --git a/plugins/connection/zone.py b/plugins/connection/zone.py index 34827c7e37..49b3188f44 100644 --- a/plugins/connection/zone.py +++ b/plugins/connection/zone.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Based on local.py (c) 2012, Michael DeHaan # and chroot.py (c) 2013, Maykel Moya # and jail.py (c) 2013, Michael Scherer @@ -8,32 +7,32 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Ansible Core Team - name: zone - short_description: Run tasks in a zone instance +DOCUMENTATION = r""" +author: Ansible Core Team +name: zone +short_description: Run tasks in a zone instance +description: + - Run commands or put/fetch files to an existing zone. +options: + remote_addr: description: - - Run commands or put/fetch files to an existing zone - options: - remote_addr: - description: - - Zone identifier - default: inventory_hostname - vars: - - name: ansible_host - - name: ansible_zone_host -''' + - Zone identifier. + type: string + default: inventory_hostname + vars: + - name: ansible_host + - name: ansible_zone_host +""" import os import os.path import subprocess import traceback +from shlex import quote as shlex_quote from ansible.errors import AnsibleError -from ansible.module_utils.six.moves import shlex_quote from ansible.module_utils.common.process import get_bin_path from ansible.module_utils.common.text.converters import to_bytes from ansible.plugins.connection import ConnectionBase, BUFSIZE @@ -61,14 +60,14 @@ class Connection(ConnectionBase): self.zlogin_cmd = to_bytes(self._search_executable('zlogin')) if self.zone not in self.list_zones(): - raise AnsibleError("incorrect zone name %s" % self.zone) + raise AnsibleError(f"incorrect zone name {self.zone}") @staticmethod def _search_executable(executable): try: return get_bin_path(executable) except ValueError: - raise AnsibleError("%s command not found in PATH" % executable) + raise AnsibleError(f"{executable} command not found in PATH") def list_zones(self): process = subprocess.Popen([self.zoneadm_cmd, 'list', '-ip'], @@ -93,7 +92,7 @@ class Connection(ConnectionBase): # stdout, stderr = p.communicate() path = process.stdout.readlines()[0].split(':')[3] - return path + '/root' + return f"{path}/root" def _connect(self): """ connect to the zone; nothing to do here """ @@ -116,7 +115,7 @@ class Connection(ConnectionBase): local_cmd = [self.zlogin_cmd, self.zone, cmd] local_cmd = map(to_bytes, local_cmd) - display.vvv("EXEC %s" % (local_cmd), host=self.zone) + display.vvv(f"EXEC {local_cmd}", host=self.zone) p = subprocess.Popen(local_cmd, shell=False, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -139,7 +138,7 @@ class Connection(ConnectionBase): exist in any given chroot. So for now we're choosing "/" instead. This also happens to be the former default. - Can revisit using $HOME instead if it's a problem + Can revisit using $HOME instead if it is a problem """ if not remote_path.startswith(os.path.sep): remote_path = os.path.join(os.path.sep, remote_path) @@ -148,7 +147,7 @@ class Connection(ConnectionBase): def put_file(self, in_path, out_path): """ transfer a file from local to zone """ super(Connection, self).put_file(in_path, out_path) - display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.zone) + display.vvv(f"PUT {in_path} TO {out_path}", host=self.zone) out_path = shlex_quote(self._prefix_login_path(out_path)) try: @@ -158,27 +157,27 @@ class Connection(ConnectionBase): else: count = '' try: - p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file) + p = self._buffered_exec_command(f'dd of={out_path} bs={BUFSIZE}{count}', stdin=in_file) except OSError: raise AnsibleError("jail connection requires dd command in the jail") try: stdout, stderr = p.communicate() except Exception: traceback.print_exc() - raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}") if p.returncode != 0: - raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{stdout}\n{stderr}") except IOError: - raise AnsibleError("file or module does not exist at: %s" % in_path) + raise AnsibleError(f"file or module does not exist at: {in_path}") def fetch_file(self, in_path, out_path): """ fetch a file from zone to local """ super(Connection, self).fetch_file(in_path, out_path) - display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.zone) + display.vvv(f"FETCH {in_path} TO {out_path}", host=self.zone) in_path = shlex_quote(self._prefix_login_path(in_path)) try: - p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE)) + p = self._buffered_exec_command(f'dd if={in_path} bs={BUFSIZE}') except OSError: raise AnsibleError("zone connection requires dd command in the zone") @@ -190,10 +189,10 @@ class Connection(ConnectionBase): chunk = p.stdout.read(BUFSIZE) except Exception: traceback.print_exc() - raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}") stdout, stderr = p.communicate() if p.returncode != 0: - raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr)) + raise AnsibleError(f"failed to transfer file {in_path} to {out_path}:\n{stdout}\n{stderr}") def close(self): """ terminate the connection; nothing to do here """ diff --git a/plugins/doc_fragments/alicloud.py b/plugins/doc_fragments/alicloud.py index f464e178c7..f0083c9946 100644 --- a/plugins/doc_fragments/alicloud.py +++ b/plugins/doc_fragments/alicloud.py @@ -1,109 +1,97 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Alicloud only documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: alicloud_access_key: description: - - Alibaba Cloud access key. If not set then the value of environment variable C(ALICLOUD_ACCESS_KEY), - C(ALICLOUD_ACCESS_KEY_ID) will be used instead. + - Alibaba Cloud access key. If not set then the value of environment variable E(ALICLOUD_ACCESS_KEY), E(ALICLOUD_ACCESS_KEY_ID) + is used instead. aliases: ['access_key_id', 'access_key'] type: str alicloud_secret_key: description: - - Alibaba Cloud secret key. If not set then the value of environment variable C(ALICLOUD_SECRET_KEY), - C(ALICLOUD_SECRET_ACCESS_KEY) will be used instead. + - Alibaba Cloud secret key. If not set then the value of environment variable E(ALICLOUD_SECRET_KEY), E(ALICLOUD_SECRET_ACCESS_KEY) + is used instead. aliases: ['secret_access_key', 'secret_key'] type: str alicloud_region: description: - - The Alibaba Cloud region to use. If not specified then the value of environment variable - C(ALICLOUD_REGION), C(ALICLOUD_REGION_ID) will be used instead. + - The Alibaba Cloud region to use. If not specified then the value of environment variable E(ALICLOUD_REGION), E(ALICLOUD_REGION_ID) + is used instead. aliases: ['region', 'region_id'] required: true type: str alicloud_security_token: description: - - The Alibaba Cloud security token. If not specified then the value of environment variable - C(ALICLOUD_SECURITY_TOKEN) will be used instead. + - The Alibaba Cloud security token. If not specified then the value of environment variable E(ALICLOUD_SECURITY_TOKEN) + is used instead. aliases: ['security_token'] type: str alicloud_assume_role: description: - - If provided with a role ARN, Ansible will attempt to assume this role using the supplied credentials. - - The nested assume_role block supports I(alicloud_assume_role_arn), I(alicloud_assume_role_session_name), - I(alicloud_assume_role_session_expiration) and I(alicloud_assume_role_policy) + - If provided with a role ARN, Ansible attempts to assume this role using the supplied credentials. + - The nested assume_role block supports C(alicloud_assume_role_arn), C(alicloud_assume_role_session_name), C(alicloud_assume_role_session_expiration) + and C(alicloud_assume_role_policy). type: dict aliases: ['assume_role'] alicloud_assume_role_arn: description: - - The Alibaba Cloud role_arn. The ARN of the role to assume. If ARN is set to an empty string, - it does not perform role switching. It supports environment variable ALICLOUD_ASSUME_ROLE_ARN. - ansible will execute with provided credentials. + - The Alibaba Cloud C(role_arn). The ARN of the role to assume. If ARN is set to an empty string, it does not perform + role switching. It supports environment variable E(ALICLOUD_ASSUME_ROLE_ARN). Ansible executes with provided credentials. aliases: ['assume_role_arn'] type: str alicloud_assume_role_session_name: description: - - The Alibaba Cloud session_name. The session name to use when assuming the role. If omitted, - 'ansible' is passed to the AssumeRole call as session name. It supports environment variable - ALICLOUD_ASSUME_ROLE_SESSION_NAME + - The Alibaba Cloud session_name. The session name to use when assuming the role. If omitted, 'ansible' is passed to + the AssumeRole call as session name. It supports environment variable E(ALICLOUD_ASSUME_ROLE_SESSION_NAME). aliases: ['assume_role_session_name'] type: str alicloud_assume_role_session_expiration: description: - - The Alibaba Cloud session_expiration. The time after which the established session for assuming - role expires. Valid value range 900-3600 seconds. Default to 3600 (in this case Alicloud use own default - value). It supports environment variable ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION + - The Alibaba Cloud C(session_expiration). The time after which the established session for assuming role expires. Valid + value range 900-3600 seconds. Default to 3600 (in this case Alicloud use own default value). It supports environment + variable E(ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION). aliases: ['assume_role_session_expiration'] type: int ecs_role_name: description: - - The RAM Role Name attached on a ECS instance for API operations. You can retrieve this from the 'Access Control' - section of the Alibaba Cloud console. - - If you're running Ansible from an ECS instance with RAM Instance using RAM Role, Ansible will just access the - metadata U(http://100.100.100.200/latest/meta-data/ram/security-credentials/) to obtain the STS - credential. This is a preferred approach over any other when running in ECS as you can avoid hard coding - credentials. Instead these are leased on-the-fly by Ansible which reduces the chance of leakage. + - The RAM Role Name attached on a ECS instance for API operations. You can retrieve this from the 'Access Control' section + of the Alibaba Cloud console. + - If you are running Ansible from an ECS instance with RAM Instance using RAM Role, Ansible just accesses the metadata + U(http://100.100.100.200/latest/meta-data/ram/security-credentials/) to obtain the STS credential. + This is a preferred approach over any other when running in ECS as you can avoid hard coding credentials. Instead + these are leased on-the-fly by Ansible which reduces the chance of leakage. aliases: ['role_name'] type: str profile: description: - - This is the Alicloud profile name as set in the shared credentials file. It can also be sourced from the - ALICLOUD_PROFILE environment variable. + - This is the Alicloud profile name as set in the shared credentials file. It can also be sourced from the E(ALICLOUD_PROFILE) + environment variable. type: str shared_credentials_file: description: - - This is the path to the shared credentials file. It can also be sourced from the ALICLOUD_SHARED_CREDENTIALS_FILE + - This is the path to the shared credentials file. It can also be sourced from the E(ALICLOUD_SHARED_CREDENTIALS_FILE) environment variable. - - If this is not set and a profile is specified, ~/.aliyun/config.json will be used. + - If this is not set and a profile is specified, C(~/.aliyun/config.json) is used. type: str author: - - "He Guimin (@xiaozhu36)" + - "He Guimin (@xiaozhu36)" requirements: - - "python >= 3.6" + - "Python >= 3.6" notes: - - If parameters are not set within the module, the following - environment variables can be used in decreasing order of precedence - C(ALICLOUD_ACCESS_KEY) or C(ALICLOUD_ACCESS_KEY_ID), - C(ALICLOUD_SECRET_KEY) or C(ALICLOUD_SECRET_ACCESS_KEY), - C(ALICLOUD_REGION) or C(ALICLOUD_REGION_ID), - C(ALICLOUD_SECURITY_TOKEN), - C(ALICLOUD_ECS_ROLE_NAME), - C(ALICLOUD_SHARED_CREDENTIALS_FILE), - C(ALICLOUD_PROFILE), - C(ALICLOUD_ASSUME_ROLE_ARN), - C(ALICLOUD_ASSUME_ROLE_SESSION_NAME), - C(ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION), - - C(ALICLOUD_REGION) or C(ALICLOUD_REGION_ID) can be typically be used to specify the - ALICLOUD region, when required, but this can also be configured in the footmark config file -''' + - If parameters are not set within the module, the following environment variables can be used in decreasing order of precedence + E(ALICLOUD_ACCESS_KEY) or E(ALICLOUD_ACCESS_KEY_ID), E(ALICLOUD_SECRET_KEY) or E(ALICLOUD_SECRET_ACCESS_KEY), E(ALICLOUD_REGION) + or E(ALICLOUD_REGION_ID), E(ALICLOUD_SECURITY_TOKEN), E(ALICLOUD_ECS_ROLE_NAME), E(ALICLOUD_SHARED_CREDENTIALS_FILE), + E(ALICLOUD_PROFILE), E(ALICLOUD_ASSUME_ROLE_ARN), E(ALICLOUD_ASSUME_ROLE_SESSION_NAME), E(ALICLOUD_ASSUME_ROLE_SESSION_EXPIRATION). + - E(ALICLOUD_REGION) or E(ALICLOUD_REGION_ID) can be typically be used to specify the Alicloud region, when required, but + this can also be configured in the footmark config file. +""" diff --git a/plugins/doc_fragments/attributes.py b/plugins/doc_fragments/attributes.py index 9b8488e0a5..fdafe1aeaa 100644 --- a/plugins/doc_fragments/attributes.py +++ b/plugins/doc_fragments/attributes.py @@ -1,93 +1,91 @@ -# -*- coding: utf-8 -*- # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: {} attributes: - check_mode: - description: Can run in C(check_mode) and return changed status prediction without modifying target. - diff_mode: - description: Will return details on what has changed (or possibly needs changing in C(check_mode)), when in diff mode. -''' + check_mode: + description: Can run in C(check_mode) and return changed status prediction without modifying target. + diff_mode: + description: Returns details on what has changed (or possibly needs changing in C(check_mode)), when in diff mode. +""" - PLATFORM = r''' + PLATFORM = r""" options: {} attributes: - platform: - description: Target OS/families that can be operated against. - support: N/A -''' + platform: + description: Target OS/families that can be operated against. + support: N/A +""" # Should be used together with the standard fragment INFO_MODULE = r''' options: {} attributes: - check_mode: - support: full - details: - - This action does not modify state. - diff_mode: - support: N/A - details: - - This action does not modify state. + check_mode: + support: full + details: + - This action does not modify state. + diff_mode: + support: N/A + details: + - This action does not modify state. ''' - CONN = r''' + CONN = r""" options: {} attributes: - become: - description: Is usable alongside C(become) keywords. - connection: - description: Uses the target's configured connection information to execute code on it. - delegation: - description: Can be used in conjunction with C(delegate_to) and related keywords. -''' + become: + description: Is usable alongside C(become) keywords. + connection: + description: Uses the target's configured connection information to execute code on it. + delegation: + description: Can be used in conjunction with C(delegate_to) and related keywords. +""" - FACTS = r''' + FACTS = r""" options: {} attributes: - facts: - description: Action returns an C(ansible_facts) dictionary that will update existing host facts. -''' + facts: + description: Action returns an C(ansible_facts) dictionary that updates existing host facts. +""" # Should be used together with the standard fragment and the FACTS fragment FACTS_MODULE = r''' options: {} attributes: - check_mode: - support: full - details: - - This action does not modify state. - diff_mode: - support: N/A - details: - - This action does not modify state. - facts: - support: full + check_mode: + support: full + details: + - This action does not modify state. + diff_mode: + support: N/A + details: + - This action does not modify state. + facts: + support: full ''' - FILES = r''' + FILES = r""" options: {} attributes: - safe_file_operations: - description: Uses Ansible's strict file operation functions to ensure proper permissions and avoid data corruption. -''' + safe_file_operations: + description: Uses Ansible's strict file operation functions to ensure proper permissions and avoid data corruption. +""" - FLOW = r''' + FLOW = r""" options: {} attributes: - action: - description: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. - async: - description: Supports being used with the C(async) keyword. -''' + action: + description: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller. + async: + description: Supports being used with the C(async) keyword. +""" diff --git a/plugins/doc_fragments/auth_basic.py b/plugins/doc_fragments/auth_basic.py index 674fb1e9ab..3d99466165 100644 --- a/plugins/doc_fragments/auth_basic.py +++ b/plugins/doc_fragments/auth_basic.py @@ -1,32 +1,30 @@ -# -*- coding: utf-8 -*- # Copyright (c) Ansible project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard files documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: api_url: description: - - The resolvable endpoint for the API + - The resolvable endpoint for the API. type: str api_username: description: - - The username to use for authentication against the API + - The username to use for authentication against the API. type: str api_password: description: - - The password to use for authentication against the API + - The password to use for authentication against the API. type: str validate_certs: description: - - Whether or not to validate SSL certs when supplying a https endpoint. + - Whether or not to validate SSL certs when supplying a HTTPS endpoint. type: bool default: true -''' +""" diff --git a/plugins/doc_fragments/bitbucket.py b/plugins/doc_fragments/bitbucket.py index 703bb412a1..c96a010e71 100644 --- a/plugins/doc_fragments/bitbucket.py +++ b/plugins/doc_fragments/bitbucket.py @@ -1,44 +1,42 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2019, Evgeniy Krysanov # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: client_id: description: - The OAuth consumer key. - - If not set the environment variable C(BITBUCKET_CLIENT_ID) will be used. + - If not set the environment variable E(BITBUCKET_CLIENT_ID) is used. type: str client_secret: description: - The OAuth consumer secret. - - If not set the environment variable C(BITBUCKET_CLIENT_SECRET) will be used. + - If not set the environment variable E(BITBUCKET_CLIENT_SECRET) is used. type: str user: description: - The username. - - If not set the environment variable C(BITBUCKET_USERNAME) will be used. - - I(username) is an alias of I(user) since community.genreal 6.0.0. It was an alias of I(workspace) before. + - If not set the environment variable E(BITBUCKET_USERNAME) is used. + - O(ignore:username) is an alias of O(user) since community.general 6.0.0. It was an alias of O(workspace) before. type: str version_added: 4.0.0 - aliases: [ username ] + aliases: [username] password: description: - The App password. - - If not set the environment variable C(BITBUCKET_PASSWORD) will be used. + - If not set the environment variable E(BITBUCKET_PASSWORD) is used. type: str version_added: 4.0.0 notes: - Bitbucket OAuth consumer key and secret can be obtained from Bitbucket profile -> Settings -> Access Management -> OAuth. - Bitbucket App password can be created from Bitbucket profile -> Personal Settings -> App passwords. - If both OAuth and Basic Auth credentials are passed, OAuth credentials take precedence. -''' +""" diff --git a/plugins/doc_fragments/consul.py b/plugins/doc_fragments/consul.py new file mode 100644 index 0000000000..fd9c1a6e6c --- /dev/null +++ b/plugins/doc_fragments/consul.py @@ -0,0 +1,55 @@ +# Copyright (c) Ansible project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +class ModuleDocFragment: + # Common parameters for Consul modules + DOCUMENTATION = r""" +options: + host: + description: + - Host of the Consul agent. + default: localhost + type: str + port: + type: int + description: + - The port on which the consul agent is running. + default: 8500 + scheme: + description: + - The protocol scheme on which the Consul agent is running. Defaults to V(http) and can be set to V(https) for secure + connections. + default: http + type: str + validate_certs: + type: bool + description: + - Whether to verify the TLS certificate of the Consul agent. + default: true + ca_path: + description: + - The CA bundle to use for https connections. + type: str +""" + + TOKEN = r""" +options: + token: + description: + - The token to use for authorization. + type: str +""" + + ACTIONGROUP_CONSUL = r""" +options: {} +attributes: + action_group: + description: Use C(group/community.general.consul) in C(module_defaults) to set defaults for this module. + support: full + membership: + - community.general.consul +""" diff --git a/plugins/doc_fragments/dimensiondata.py b/plugins/doc_fragments/dimensiondata.py index f8372431e0..1804c3c7ba 100644 --- a/plugins/doc_fragments/dimensiondata.py +++ b/plugins/doc_fragments/dimensiondata.py @@ -1,11 +1,9 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2016, Dimension Data # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations # Authors: # - Adam Friedman @@ -14,28 +12,27 @@ __metaclass__ = type class ModuleDocFragment(object): # Dimension Data doc fragment - DOCUMENTATION = r''' - + DOCUMENTATION = r""" options: region: description: - The target region. - - Regions are defined in Apache libcloud project [libcloud/common/dimensiondata.py] - - They are also listed in U(https://libcloud.readthedocs.io/en/latest/compute/drivers/dimensiondata.html) - - Note that the default value "na" stands for "North America". - - The module prepends 'dd-' to the region choice. + - Regions are defined in Apache libcloud project [libcloud/common/dimensiondata.py]. + - They are also listed in U(https://libcloud.readthedocs.io/en/latest/compute/drivers/dimensiondata.html). + - Note that the default value C(na) stands for "North America". + - The module prepends C(dd-) to the region choice. type: str default: na mcp_user: description: - The username used to authenticate to the CloudControl API. - - If not specified, will fall back to C(MCP_USER) from environment variable or C(~/.dimensiondata). + - If not specified, falls back to E(MCP_USER) from environment variable or C(~/.dimensiondata). type: str mcp_password: description: - The password used to authenticate to the CloudControl API. - - If not specified, will fall back to C(MCP_PASSWORD) from environment variable or C(~/.dimensiondata). - - Required if I(mcp_user) is specified. + - If not specified, falls back to E(MCP_PASSWORD) from environment variable or C(~/.dimensiondata). + - Required if O(mcp_user) is specified. type: str location: description: @@ -44,8 +41,8 @@ options: required: true validate_certs: description: - - If C(false), SSL certificates will not be validated. + - If V(false), SSL certificates are not validated. - This should only be used on private instances of the CloudControl API that use self-signed certificates. type: bool default: true -''' +""" diff --git a/plugins/doc_fragments/dimensiondata_wait.py b/plugins/doc_fragments/dimensiondata_wait.py index d371528396..40b3a1d6e8 100644 --- a/plugins/doc_fragments/dimensiondata_wait.py +++ b/plugins/doc_fragments/dimensiondata_wait.py @@ -1,11 +1,9 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2016, Dimension Data # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations # Authors: # - Adam Friedman @@ -14,8 +12,7 @@ __metaclass__ = type class ModuleDocFragment(object): # Dimension Data ("wait-for-completion" parameters) doc fragment - DOCUMENTATION = r''' - + DOCUMENTATION = r""" options: wait: description: @@ -25,13 +22,13 @@ options: wait_time: description: - The maximum amount of time (in seconds) to wait for the task to complete. - - Only applicable if I(wait=true). + - Only applicable if O(wait=true). type: int default: 600 wait_poll_interval: description: - The amount of time (in seconds) to wait between checks for task completion. - - Only applicable if I(wait=true). + - Only applicable if O(wait=true). type: int default: 2 - ''' +""" diff --git a/plugins/doc_fragments/django.py b/plugins/doc_fragments/django.py new file mode 100644 index 0000000000..f62e2224d8 --- /dev/null +++ b/plugins/doc_fragments/django.py @@ -0,0 +1,80 @@ +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +class ModuleDocFragment(object): + DOCUMENTATION = r""" +options: + venv: + description: + - Use the Python interpreter from this virtual environment. + - Pass the path to the root of the virtualenv, not the C(bin/) directory nor the C(python) executable. + type: path + settings: + description: + - Specifies the settings module to use. + - The value is passed as is to the C(--settings) argument in C(django-admin). + type: str + required: true + pythonpath: + description: + - Adds the given filesystem path to the Python import search path. + - The value is passed as is to the C(--pythonpath) argument in C(django-admin). + type: path + traceback: + description: + - Provides a full stack trace in the output when a C(CommandError) is raised. + type: bool + verbosity: + description: + - Specifies the amount of notification and debug information in the output of C(django-admin). + type: int + choices: [0, 1, 2, 3] + skip_checks: + description: + - Skips running system checks prior to running the command. + type: bool + + +notes: + - The C(django-admin) command is always executed using the C(C) locale, and the option C(--no-color) is always passed. +seealso: + - name: django-admin and manage.py in official Django documentation + description: >- + Refer to this documentation for the builtin commands and options of C(django-admin). Please make sure that you select + the right version of Django in the version selector on that page. + link: https://docs.djangoproject.com/en/5.0/ref/django-admin/ +""" + + DATABASE = r""" +options: + database: + description: + - Specify the database to be used. + type: str + default: default +""" + + DATA = r""" +options: + excludes: + description: + - Applications or models to be excluded. + - Format must be either V(app_label) or V(app_label.ModelName). + type: list + elements: str + format: + description: + - Serialization format of the output data. + type: str + default: json + choices: [xml, json, jsonl, yaml] +notes: + - As it is now, the module is B(not idempotent). Ensuring idempotency for this case can be a bit tricky, because it would + amount to ensuring beforehand that all the data in the fixture file is already in the database, which is not a trivial feat. + Unfortunately, neither C(django loaddata) nor C(django dumpdata) have a C(--dry-run) option, so the only way to know whether + there is a change or not is to actually load or dump the data. +""" diff --git a/plugins/doc_fragments/emc.py b/plugins/doc_fragments/emc.py index e9e57a2c10..9268b7fc42 100644 --- a/plugins/doc_fragments/emc.py +++ b/plugins/doc_fragments/emc.py @@ -1,46 +1,34 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Luca Lorenzetto (@remix_tj) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): - DOCUMENTATION = r''' -options: - - See respective platform section for more details -requirements: - - See respective platform section for more details -notes: - - Ansible modules are available for EMC VNX. -''' - # Documentation fragment for VNX (emc_vnx) EMC_VNX = r''' options: - sp_address: - description: - - Address of the SP of target/secondary storage. - type: str - required: true - sp_user: - description: - - Username for accessing SP. - type: str - default: sysadmin - sp_password: - description: - - password for accessing SP. - type: str - default: sysadmin + sp_address: + description: + - Address of the SP of target/secondary storage. + type: str + required: true + sp_user: + description: + - Username for accessing SP. + type: str + default: sysadmin + sp_password: + description: + - password for accessing SP. + type: str + default: sysadmin requirements: - An EMC VNX Storage device. - - Ansible 2.7. - - storops (0.5.10 or greater). Install using 'pip install storops'. + - storops (0.5.10 or greater). Install using C(pip install storops). notes: - - The modules prefixed with emc_vnx are built to support the EMC VNX storage platform. + - The modules prefixed with C(emc_vnx) are built to support the EMC VNX storage platform. ''' diff --git a/plugins/doc_fragments/gitlab.py b/plugins/doc_fragments/gitlab.py index 705a93c023..af7a527a81 100644 --- a/plugins/doc_fragments/gitlab.py +++ b/plugins/doc_fragments/gitlab.py @@ -1,16 +1,14 @@ -# -*- coding: utf-8 -*- # Copyright (c) Ansible project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard files documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" requirements: - requests (Python library U(https://pypi.org/project/requests/)) @@ -29,4 +27,9 @@ options: - GitLab CI job token for logging in. type: str version_added: 4.2.0 -''' + ca_path: + description: + - The CA certificates bundle to use to verify GitLab server certificate. + type: str + version_added: 8.1.0 +""" diff --git a/plugins/doc_fragments/hpe3par.py b/plugins/doc_fragments/hpe3par.py index 96e53846e1..e126c63c56 100644 --- a/plugins/doc_fragments/hpe3par.py +++ b/plugins/doc_fragments/hpe3par.py @@ -1,36 +1,33 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # HPE 3PAR doc fragment - DOCUMENTATION = ''' + DOCUMENTATION = r""" options: - storage_system_ip: - description: - - The storage system IP address. - type: str - required: true - storage_system_password: - description: - - The storage system password. - type: str - required: true - storage_system_username: - description: - - The storage system user name. - type: str - required: true + storage_system_ip: + description: + - The storage system IP address. + type: str + required: true + storage_system_password: + description: + - The storage system password. + type: str + required: true + storage_system_username: + description: + - The storage system user name. + type: str + required: true requirements: - - hpe3par_sdk >= 1.0.2. Install using 'pip install hpe3par_sdk' + - hpe3par_sdk >= 1.0.2. Install using C(pip install hpe3par_sdk). - WSAPI service should be enabled on the 3PAR storage array. notes: - - check_mode not supported - ''' +""" diff --git a/plugins/doc_fragments/hwc.py b/plugins/doc_fragments/hwc.py index d3cebb6dbc..99362243ec 100644 --- a/plugins/doc_fragments/hwc.py +++ b/plugins/doc_fragments/hwc.py @@ -1,66 +1,57 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Huawei Inc. # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # HWC doc fragment. - DOCUMENTATION = ''' + DOCUMENTATION = r""" options: - identity_endpoint: - description: - - The Identity authentication URL. - type: str - required: true - user: - description: - - The user name to login with (currently only user names are - supported, and not user IDs). - type: str - required: true - password: - description: - - The password to login with. - type: str - required: true - domain: - description: - - The name of the Domain to scope to (Identity v3). - (currently only domain names are supported, and not domain IDs). - type: str - required: true - project: - description: - - The name of the Tenant (Identity v2) or Project (Identity v3). - (currently only project names are supported, and not - project IDs). - type: str - required: true - region: - description: - - The region to which the project belongs. - type: str - id: - description: - - The id of resource to be managed. - type: str + identity_endpoint: + description: + - The Identity authentication URL. + type: str + required: true + user: + description: + - The user name to login with. + - Currently only user names are supported, and not user IDs. + type: str + required: true + password: + description: + - The password to login with. + type: str + required: true + domain: + description: + - The name of the Domain to scope to (Identity v3). + - Currently only domain names are supported, and not domain IDs. + type: str + required: true + project: + description: + - The name of the Tenant (Identity v2) or Project (Identity v3). + - Currently only project names are supported, and not project IDs. + type: str + required: true + region: + description: + - The region to which the project belongs. + type: str + id: + description: + - The ID of resource to be managed. + type: str notes: - - For authentication, you can set identity_endpoint using the - C(ANSIBLE_HWC_IDENTITY_ENDPOINT) env variable. - - For authentication, you can set user using the - C(ANSIBLE_HWC_USER) env variable. - - For authentication, you can set password using the C(ANSIBLE_HWC_PASSWORD) env - variable. - - For authentication, you can set domain using the C(ANSIBLE_HWC_DOMAIN) env - variable. - - For authentication, you can set project using the C(ANSIBLE_HWC_PROJECT) env - variable. - - For authentication, you can set region using the C(ANSIBLE_HWC_REGION) env variable. - - Environment variables values will only be used if the playbook values are - not set. -''' + - For authentication, you can set identity_endpoint using the E(ANSIBLE_HWC_IDENTITY_ENDPOINT) environment variable. + - For authentication, you can set user using the E(ANSIBLE_HWC_USER) environment variable. + - For authentication, you can set password using the E(ANSIBLE_HWC_PASSWORD) environment variable. + - For authentication, you can set domain using the E(ANSIBLE_HWC_DOMAIN) environment variable. + - For authentication, you can set project using the E(ANSIBLE_HWC_PROJECT) environment variable. + - For authentication, you can set region using the E(ANSIBLE_HWC_REGION) environment variable. + - Environment variables values are only used when the playbook values are not set. +""" diff --git a/plugins/doc_fragments/ibm_storage.py b/plugins/doc_fragments/ibm_storage.py index ff38c3fc7c..ab61cd51c1 100644 --- a/plugins/doc_fragments/ibm_storage.py +++ b/plugins/doc_fragments/ibm_storage.py @@ -1,38 +1,34 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, IBM CORPORATION # Author(s): Tzur Eliyahu # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # ibm_storage documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: - username: - description: - - Management user on the spectrum accelerate storage system. - type: str - required: true - password: - description: - - Password for username on the spectrum accelerate storage system. - type: str - required: true - endpoints: - description: - - The hostname or management IP of Spectrum Accelerate storage system. - type: str - required: true + username: + description: + - Management user on the Spectrum Accelerate storage system. + type: str + required: true + password: + description: + - Password for username on the Spectrum Accelerate storage system. + type: str + required: true + endpoints: + description: + - The hostname or management IP of Spectrum Accelerate storage system. + type: str + required: true notes: - - This module requires pyxcli python library. - Use 'pip install pyxcli' in order to get pyxcli. + - This module requires pyxcli python library. Use C(pip install pyxcli) in order to get pyxcli. requirements: - - python >= 2.7 - pyxcli -''' +""" diff --git a/plugins/doc_fragments/influxdb.py b/plugins/doc_fragments/influxdb.py index 6aedd5ad39..7f0688b868 100644 --- a/plugins/doc_fragments/influxdb.py +++ b/plugins/doc_fragments/influxdb.py @@ -1,85 +1,80 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017, Ansible Project # Copyright (c) 2017, Abhijeet Kasurde (akasurde@redhat.com) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Parameters for influxdb modules - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: hostname: description: - - The hostname or IP address on which InfluxDB server is listening. - - Since Ansible 2.5, defaulted to localhost. + - The hostname or IP address on which InfluxDB server is listening. type: str default: localhost username: description: - - Username that will be used to authenticate against InfluxDB server. - - Alias C(login_username) added in Ansible 2.5. + - Username that is used to authenticate against InfluxDB server. type: str default: root - aliases: [ login_username ] + aliases: [login_username] password: description: - - Password that will be used to authenticate against InfluxDB server. - - Alias C(login_password) added in Ansible 2.5. + - Password that is used to authenticate against InfluxDB server. type: str default: root - aliases: [ login_password ] + aliases: [login_password] port: description: - - The port on which InfluxDB server is listening + - The port on which InfluxDB server is listening. type: int default: 8086 path: description: - - The path on which InfluxDB server is accessible - - Only available when using python-influxdb >= 5.1.0 + - The path on which InfluxDB server is accessible. + - Only available when using python-influxdb >= 5.1.0. type: str default: '' version_added: '0.2.0' validate_certs: description: - - If set to C(false), the SSL certificates will not be validated. - - This should only set to C(false) used on personally controlled sites using self-signed certificates. + - If set to V(false), the SSL certificates are not validated. + - This should only set to V(false) used on personally controlled sites using self-signed certificates. type: bool default: true ssl: description: - - Use https instead of http to connect to InfluxDB server. + - Use https instead of http to connect to InfluxDB server. type: bool default: false timeout: description: - - Number of seconds Requests will wait for client to establish a connection. + - Number of seconds Requests waits for client to establish a connection. type: int retries: description: - - Number of retries client will try before aborting. - - C(0) indicates try until success. - - Only available when using python-influxdb >= 4.1.0 + - Number of retries client performs before aborting. + - V(0) indicates try until success. + - Only available when using C(python-influxdb) >= 4.1.0. type: int default: 3 use_udp: description: - - Use UDP to connect to InfluxDB server. + - Use UDP to connect to InfluxDB server. type: bool default: false udp_port: description: - - UDP port to connect to InfluxDB server. + - UDP port to connect to InfluxDB server. type: int default: 4444 proxies: description: - - HTTP(S) proxy to use for Requests to connect to InfluxDB server. + - HTTP(S) proxy to use for Requests to connect to InfluxDB server. type: dict default: {} -''' +""" diff --git a/plugins/doc_fragments/ipa.py b/plugins/doc_fragments/ipa.py index 5051c55390..0b740ae8ed 100644 --- a/plugins/doc_fragments/ipa.py +++ b/plugins/doc_fragments/ipa.py @@ -1,76 +1,83 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017-18, Ansible Project # Copyright (c) 2017-18, Abhijeet Kasurde (akasurde@redhat.com) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Parameters for FreeIPA/IPA modules - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: ipa_port: description: - - Port of FreeIPA / IPA server. - - If the value is not specified in the task, the value of environment variable C(IPA_PORT) will be used instead. - - If both the environment variable C(IPA_PORT) and the value are not specified in the task, then default value is set. - - Environment variable fallback mechanism is added in Ansible 2.5. + - Port of FreeIPA / IPA server. + - If the value is not specified in the task, the value of environment variable E(IPA_PORT) is used instead. + - If both the environment variable E(IPA_PORT) and the value are not specified in the task, then default value is set. type: int default: 443 ipa_host: description: - - IP or hostname of IPA server. - - If the value is not specified in the task, the value of environment variable C(IPA_HOST) will be used instead. - - If both the environment variable C(IPA_HOST) and the value are not specified in the task, then DNS will be used to try to discover the FreeIPA server. - - The relevant entry needed in FreeIPA is the 'ipa-ca' entry. - - If neither the DNS entry, nor the environment C(IPA_HOST), nor the value are available in the task, then the default value will be used. - - Environment variable fallback mechanism is added in Ansible 2.5. + - IP or hostname of IPA server. + - If the value is not specified in the task, the value of environment variable E(IPA_HOST) is used instead. + - If both the environment variable E(IPA_HOST) and the value are not specified in the task, then DNS is used to try + to discover the FreeIPA server. + - The relevant entry needed in FreeIPA is the C(ipa-ca) entry. + - If neither the DNS entry, nor the environment E(IPA_HOST), nor the value are available in the task, then the default + value is used. type: str default: ipa.example.com ipa_user: description: - - Administrative account used on IPA server. - - If the value is not specified in the task, the value of environment variable C(IPA_USER) will be used instead. - - If both the environment variable C(IPA_USER) and the value are not specified in the task, then default value is set. - - Environment variable fallback mechanism is added in Ansible 2.5. + - Administrative account used on IPA server. + - If the value is not specified in the task, the value of environment variable E(IPA_USER) is used instead. + - If both the environment variable E(IPA_USER) and the value are not specified in the task, then default value is set. type: str default: admin ipa_pass: description: - - Password of administrative user. - - If the value is not specified in the task, the value of environment variable C(IPA_PASS) will be used instead. - - Note that if the 'urllib_gssapi' library is available, it is possible to use GSSAPI to authenticate to FreeIPA. - - If the environment variable C(KRB5CCNAME) is available, the module will use this kerberos credentials cache to authenticate to the FreeIPA server. - - If the environment variable C(KRB5_CLIENT_KTNAME) is available, and C(KRB5CCNAME) is not; the module will use this kerberos keytab to authenticate. - - If GSSAPI is not available, the usage of 'ipa_pass' is required. - - Environment variable fallback mechanism is added in Ansible 2.5. + - Password of administrative user. + - If the value is not specified in the task, the value of environment variable E(IPA_PASS) is used instead. + - Note that if the C(urllib_gssapi) library is available, it is possible to use GSSAPI to authenticate to FreeIPA. + - If the environment variable E(KRB5CCNAME) is available, the module uses this Kerberos credentials cache to authenticate + to the FreeIPA server. + - If the environment variable E(KRB5_CLIENT_KTNAME) is available, and E(KRB5CCNAME) is not; the module uses this Kerberos + keytab to authenticate. + - If GSSAPI is not available, the usage of O(ipa_pass) is required. type: str ipa_prot: description: - - Protocol used by IPA server. - - If the value is not specified in the task, the value of environment variable C(IPA_PROT) will be used instead. - - If both the environment variable C(IPA_PROT) and the value are not specified in the task, then default value is set. - - Environment variable fallback mechanism is added in Ansible 2.5. + - Protocol used by IPA server. + - If the value is not specified in the task, the value of environment variable E(IPA_PROT) is used instead. + - If both the environment variable E(IPA_PROT) and the value are not specified in the task, then default value is set. type: str - choices: [ http, https ] + choices: [http, https] default: https validate_certs: description: - - This only applies if C(ipa_prot) is I(https). - - If set to C(false), the SSL certificates will not be validated. - - This should only set to C(false) used on personally controlled sites using self-signed certificates. + - This only applies if O(ipa_prot) is V(https). + - If set to V(false), the SSL certificates are not validated. + - This should only set to V(false) used on personally controlled sites using self-signed certificates. type: bool default: true ipa_timeout: description: - - Specifies idle timeout (in seconds) for the connection. - - For bulk operations, you may want to increase this in order to avoid timeout from IPA server. - - If the value is not specified in the task, the value of environment variable C(IPA_TIMEOUT) will be used instead. - - If both the environment variable C(IPA_TIMEOUT) and the value are not specified in the task, then default value is set. + - Specifies idle timeout (in seconds) for the connection. + - For bulk operations, you may want to increase this in order to avoid timeout from IPA server. + - If the value is not specified in the task, the value of environment variable E(IPA_TIMEOUT) is used instead. + - If both the environment variable E(IPA_TIMEOUT) and the value are not specified in the task, then default value is + set. type: int default: 10 -''' +""" + + CONNECTION_NOTES = r""" +options: {} +notes: + - This module uses JSON-RPC over HTTP(S) to communicate with the FreeIPA server. + If you need to enroll the managed node into FreeIPA realm, you might want to consider using the collection + L(freeipa.ansible_freeipa, https://galaxy.ansible.com/ui/repo/published/freeipa/ansible_freeipa/), but shell access to one + node from the realm is required to manage the deployment. +""" diff --git a/plugins/doc_fragments/keycloak.py b/plugins/doc_fragments/keycloak.py index 5d79fad7c0..2ec693eb99 100644 --- a/plugins/doc_fragments/keycloak.py +++ b/plugins/doc_fragments/keycloak.py @@ -1,78 +1,93 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017, Eike Frost # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: - auth_keycloak_url: - description: - - URL to the Keycloak instance. - type: str - required: true - aliases: - - url + auth_keycloak_url: + description: + - URL to the Keycloak instance. + type: str + required: true + aliases: + - url - auth_client_id: - description: - - OpenID Connect I(client_id) to authenticate to the API with. - type: str - default: admin-cli + auth_client_id: + description: + - OpenID Connect C(client_id) to authenticate to the API with. + type: str + default: admin-cli - auth_realm: - description: - - Keycloak realm name to authenticate to for API access. - type: str + auth_realm: + description: + - Keycloak realm name to authenticate to for API access. + type: str - auth_client_secret: - description: - - Client Secret to use in conjunction with I(auth_client_id) (if required). - type: str + auth_client_secret: + description: + - Client Secret to use in conjunction with O(auth_client_id) (if required). + type: str - auth_username: - description: - - Username to authenticate for API access with. - type: str - aliases: - - username + auth_username: + description: + - Username to authenticate for API access with. + type: str + aliases: + - username - auth_password: - description: - - Password to authenticate for API access with. - type: str - aliases: - - password + auth_password: + description: + - Password to authenticate for API access with. + type: str + aliases: + - password - token: - description: - - Authentication token for Keycloak API. - type: str - version_added: 3.0.0 + token: + description: + - Authentication token for Keycloak API. + type: str + version_added: 3.0.0 - validate_certs: - description: - - Verify TLS certificates (do not disable this in production). - type: bool - default: true + refresh_token: + description: + - Authentication refresh token for Keycloak API. + type: str + version_added: 10.3.0 - connection_timeout: - description: - - Controls the HTTP connections timeout period (in seconds) to Keycloak API. - type: int - default: 10 - version_added: 4.5.0 - http_agent: - description: - - Configures the HTTP User-Agent header. - type: str - default: Ansible - version_added: 5.4.0 -''' + validate_certs: + description: + - Verify TLS certificates (do not disable this in production). + type: bool + default: true + + connection_timeout: + description: + - Controls the HTTP connections timeout period (in seconds) to Keycloak API. + type: int + default: 10 + version_added: 4.5.0 + + http_agent: + description: + - Configures the HTTP User-Agent header. + type: str + default: Ansible + version_added: 5.4.0 +""" + + ACTIONGROUP_KEYCLOAK = r""" +options: {} +attributes: + action_group: + description: Use C(group/community.general.keycloak) in C(module_defaults) to set defaults for this module. + support: full + membership: + - community.general.keycloak +""" diff --git a/plugins/doc_fragments/ldap.py b/plugins/doc_fragments/ldap.py index 1f04c0f600..d787bfd65d 100644 --- a/plugins/doc_fragments/ldap.py +++ b/plugins/doc_fragments/ldap.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2016, Peter Sagerson # Copyright (c) 2016, Jiri Tyr @@ -6,24 +5,45 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard LDAP documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" +notes: + - The default authentication settings attempts to use a SASL EXTERNAL bind over a UNIX domain socket. This works well with + the default Ubuntu install for example, which includes a C(cn=peercred,cn=external,cn=auth) ACL rule allowing root to + modify the server configuration. If you need to use a simple bind to access your server, pass the credentials in O(bind_dn) + and O(bind_pw). options: bind_dn: description: - - A DN to bind with. If this is omitted, we'll try a SASL bind with the EXTERNAL mechanism as default. - - If this is blank, we'll use an anonymous bind. + - A DN to bind with. Try to use a SASL bind with the EXTERNAL mechanism as default when this parameter is omitted. + - Use an anonymous bind if the parameter is blank. type: str bind_pw: description: - - The password to use with I(bind_dn). + - The password to use with O(bind_dn). type: str default: '' + ca_path: + description: + - Set the path to PEM file with CA certs. + type: path + version_added: "6.5.0" + client_cert: + type: path + description: + - PEM formatted certificate chain file to be used for SSL client authentication. + - Required if O(client_key) is defined. + version_added: "7.1.0" + client_key: + type: path + description: + - PEM formatted file that contains your private key to be used for SSL client authentication. + - Required if O(client_cert) is defined. + version_added: "7.1.0" dn: required: true description: @@ -35,12 +55,13 @@ options: type: str description: - Set the referrals chasing behavior. - - C(anonymous) follow referrals anonymously. This is the default behavior. - - C(disabled) disable referrals chasing. This sets C(OPT_REFERRALS) to off. + - V(anonymous) follow referrals anonymously. This is the default behavior. + - V(disabled) disable referrals chasing. This sets C(OPT_REFERRALS) to off. version_added: 2.0.0 server_uri: description: - - The I(server_uri) parameter may be a comma- or whitespace-separated list of URIs containing only the schema, the host, and the port fields. + - The O(server_uri) parameter may be a comma- or whitespace-separated list of URIs containing only the schema, the host, + and the port fields. - The default value lets the underlying LDAP client library look for a UNIX domain socket in its default location. - Note that when using multiple URIs you cannot determine to which URI your client gets connected. - For URIs containing additional fields, particularly when using commas, behavior is undefined. @@ -48,21 +69,30 @@ options: default: ldapi:/// start_tls: description: - - If true, we'll use the START_TLS LDAP extension. + - Use the START_TLS LDAP extension if set to V(true). type: bool default: false validate_certs: description: - - If set to C(false), SSL certificates will not be validated. + - If set to V(false), SSL certificates are not validated. - This should only be used on sites using self-signed certificates. type: bool default: true sasl_class: description: - The class to use for SASL authentication. - - Possible choices are C(external), C(gssapi). type: str choices: ['external', 'gssapi'] default: external version_added: "2.0.0" -''' + xorder_discovery: + description: + - Set the behavior on how to process Xordered DNs. + - V(enable) performs a C(ONELEVEL) search below the superior RDN to find the matching DN. + - V(disable) always uses the DN unmodified (as passed by the O(dn) parameter). + - V(auto) only performs a search if the first RDN does not contain an index number (C({x})). + type: str + choices: ['enable', 'auto', 'disable'] + default: auto + version_added: "6.4.0" +""" diff --git a/plugins/doc_fragments/lxca_common.py b/plugins/doc_fragments/lxca_common.py index b5e7d72948..72bc3b7054 100644 --- a/plugins/doc_fragments/lxca_common.py +++ b/plugins/doc_fragments/lxca_common.py @@ -1,16 +1,14 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2017 Lenovo, Inc. # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard Pylxca documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" author: - Naval Patel (@navalkp) - Prashant Bhosale (@prabhosa) @@ -18,19 +16,19 @@ author: options: login_user: description: - - The username for use in HTTP basic authentication. + - The username for use in HTTP basic authentication. type: str required: true login_password: description: - - The password for use in HTTP basic authentication. + - The password for use in HTTP basic authentication. type: str required: true auth_url: description: - - lxca https full web address + - Lxca HTTPS full web address. type: str required: true @@ -38,7 +36,6 @@ requirements: - pylxca notes: - - Additional detail about pylxca can be found at U(https://github.com/lenovo/pylxca) - - Playbooks using these modules can be found at U(https://github.com/lenovo/ansible.lenovo-lxca) - - Check mode is not supported. -''' + - Additional detail about pylxca can be found at U(https://github.com/lenovo/pylxca). + - Playbooks using these modules can be found at U(https://github.com/lenovo/ansible.lenovo-lxca). +""" diff --git a/plugins/doc_fragments/manageiq.py b/plugins/doc_fragments/manageiq.py index 030d682385..e7351e4f5e 100644 --- a/plugins/doc_fragments/manageiq.py +++ b/plugins/doc_fragments/manageiq.py @@ -1,17 +1,15 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017, Daniel Korn # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard ManageIQ documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: manageiq_connection: description: @@ -21,33 +19,34 @@ options: suboptions: url: description: - - ManageIQ environment url. C(MIQ_URL) env var if set. otherwise, it is required to pass it. + - ManageIQ environment URL. E(MIQ_URL) environment variable if set. Otherwise, it is required to pass it. type: str required: false username: description: - - ManageIQ username. C(MIQ_USERNAME) env var if set. otherwise, required if no token is passed in. + - ManageIQ username. E(MIQ_USERNAME) environment variable if set. Otherwise, required if no token is passed in. type: str password: description: - - ManageIQ password. C(MIQ_PASSWORD) env var if set. otherwise, required if no token is passed in. + - ManageIQ password. E(MIQ_PASSWORD) environment variable if set. Otherwise, required if no token is passed in. type: str token: description: - - ManageIQ token. C(MIQ_TOKEN) env var if set. otherwise, required if no username or password is passed in. + - ManageIQ token. E(MIQ_TOKEN) environment variable if set. Otherwise, required if no username or password is passed + in. type: str validate_certs: description: - - Whether SSL certificates should be verified for HTTPS requests. defaults to True. + - Whether SSL certificates should be verified for HTTPS requests. type: bool default: true - aliases: [ verify_ssl ] + aliases: [verify_ssl] ca_cert: description: - - The path to a CA bundle file or directory with certificates. defaults to None. + - The path to a CA bundle file or directory with certificates. type: str - aliases: [ ca_bundle_path ] + aliases: [ca_bundle_path] requirements: - 'manageiq-client U(https://github.com/ManageIQ/manageiq-api-client-python/)' -''' +""" diff --git a/plugins/doc_fragments/nomad.py b/plugins/doc_fragments/nomad.py index b19404e830..37485ef9a7 100644 --- a/plugins/doc_fragments/nomad.py +++ b/plugins/doc_fragments/nomad.py @@ -1,52 +1,56 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2020 FERREIRA Christophe # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard files documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: - host: - description: - - FQDN of Nomad server. - required: true - type: str - use_ssl: - description: - - Use TLS/SSL connection. - type: bool - default: true - timeout: - description: - - Timeout (in seconds) for the request to Nomad. - type: int - default: 5 - validate_certs: - description: - - Enable TLS/SSL certificate validation. - type: bool - default: true - client_cert: - description: - - Path of certificate for TLS/SSL. - type: path - client_key: - description: - - Path of certificate's private key for TLS/SSL. - type: path - namespace: - description: - - Namespace for Nomad. - type: str - token: - description: - - ACL token for authentification. - type: str -''' + host: + description: + - FQDN of Nomad server. + required: true + type: str + port: + description: + - Port of Nomad server. + type: int + default: 4646 + version_added: 8.0.0 + use_ssl: + description: + - Use TLS/SSL connection. + type: bool + default: true + timeout: + description: + - Timeout (in seconds) for the request to Nomad. + type: int + default: 5 + validate_certs: + description: + - Enable TLS/SSL certificate validation. + type: bool + default: true + client_cert: + description: + - Path of certificate for TLS/SSL. + type: path + client_key: + description: + - Path of certificate's private key for TLS/SSL. + type: path + namespace: + description: + - Namespace for Nomad. + type: str + token: + description: + - ACL token for authentication. + type: str +""" diff --git a/plugins/doc_fragments/onepassword.py b/plugins/doc_fragments/onepassword.py new file mode 100644 index 0000000000..7a2c7566c3 --- /dev/null +++ b/plugins/doc_fragments/onepassword.py @@ -0,0 +1,77 @@ + +# Copyright (c) 2023, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +class ModuleDocFragment(object): + DOCUMENTATION = r""" +requirements: + - See U(https://support.1password.com/command-line/) +options: + master_password: + description: The password used to unlock the specified vault. + aliases: ['vault_password'] + type: str + section: + description: Item section containing the field to retrieve (case-insensitive). If absent, returns first match from any + section. + domain: + description: Domain of 1Password. + default: '1password.com' + type: str + subdomain: + description: The 1Password subdomain to authenticate against. + type: str + account_id: + description: The account ID to target. + type: str + username: + description: The username used to sign in. + type: str + secret_key: + description: The secret key used when performing an initial sign in. + type: str + service_account_token: + description: + - The access key for a service account. + - Only works with 1Password CLI version 2 or later. + type: str + vault: + description: Vault containing the item to retrieve (case-insensitive). If absent, searches all vaults. + type: str + connect_host: + description: The host for 1Password Connect. Must be used in combination with O(connect_token). + type: str + env: + - name: OP_CONNECT_HOST + version_added: 8.1.0 + connect_token: + description: The token for 1Password Connect. Must be used in combination with O(connect_host). + type: str + env: + - name: OP_CONNECT_TOKEN + version_added: 8.1.0 +""" + + LOOKUP = r""" +options: + service_account_token: + env: + - name: OP_SERVICE_ACCOUNT_TOKEN + version_added: 8.2.0 +notes: + - This lookup uses an existing 1Password session if one exists. If not, and you have already performed an initial sign in + (meaning C(~/.op/config), C(~/.config/op/config) or C(~/.config/.op/config) exists), then only the O(master_password) + is required. You may optionally specify O(subdomain) in this scenario, otherwise the last used subdomain is used by C(op). + - This lookup can perform an initial login by providing O(subdomain), O(username), O(secret_key), and O(master_password). + - Can target a specific account by providing the O(account_id). + - Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal + credentials needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or + greater in strength to the 1Password master password. + - This lookup stores potentially sensitive data from 1Password as Ansible facts. Facts are subject to caching if enabled, + which means this data could be stored in clear text on disk or in a database. + - Tested with C(op) version 2.7.2. +""" diff --git a/plugins/doc_fragments/oneview.py b/plugins/doc_fragments/oneview.py index 54288e51f6..9e64f02e1a 100644 --- a/plugins/doc_fragments/oneview.py +++ b/plugins/doc_fragments/oneview.py @@ -1,80 +1,75 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2016-2017, Hewlett Packard Enterprise Development LP # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # OneView doc fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: - config: - description: - - Path to a .json configuration file containing the OneView client configuration. - The configuration file is optional and when used should be present in the host running the ansible commands. - If the file path is not provided, the configuration will be loaded from environment variables. - For links to example configuration files or how to use the environment variables verify the notes section. - type: path - api_version: - description: - - OneView API Version. - type: int - image_streamer_hostname: - description: - - IP address or hostname for the HPE Image Streamer REST API. - type: str - hostname: - description: - - IP address or hostname for the appliance. - type: str - username: - description: - - Username for API authentication. - type: str - password: - description: - - Password for API authentication. - type: str + config: + description: + - Path to a JSON configuration file containing the OneView client configuration. The configuration file is optional + and when used should be present in the host running the ansible commands. If the file path is not provided, the configuration + is loaded from environment variables. For links to example configuration files or how to use the environment variables + verify the notes section. + type: path + api_version: + description: + - OneView API Version. + type: int + image_streamer_hostname: + description: + - IP address or hostname for the HPE Image Streamer REST API. + type: str + hostname: + description: + - IP address or hostname for the appliance. + type: str + username: + description: + - Username for API authentication. + type: str + password: + description: + - Password for API authentication. + type: str requirements: - - python >= 2.7.9 + - Python >= 2.7.9 notes: - - "A sample configuration file for the config parameter can be found at: - U(https://github.com/HewlettPackard/oneview-ansible/blob/master/examples/oneview_config-rename.json)" - - "Check how to use environment variables for configuration at: - U(https://github.com/HewlettPackard/oneview-ansible#environment-variables)" - - "Additional Playbooks for the HPE OneView Ansible modules can be found at: - U(https://github.com/HewlettPackard/oneview-ansible/tree/master/examples)" - - "The OneView API version used will directly affect returned and expected fields in resources. - Information on setting the desired API version and can be found at: - U(https://github.com/HewlettPackard/oneview-ansible#setting-your-oneview-version)" - ''' + - 'A sample configuration file for the config parameter can be found at: + U(https://github.com/HewlettPackard/oneview-ansible/blob/master/examples/oneview_config-rename.json).' + - 'Check how to use environment variables for configuration at: U(https://github.com/HewlettPackard/oneview-ansible#environment-variables).' + - 'Additional Playbooks for the HPE OneView Ansible modules can be found at: U(https://github.com/HewlettPackard/oneview-ansible/tree/master/examples).' + - 'The OneView API version used directly affects returned and expected fields in resources. Information on setting the desired + API version and can be found at: U(https://github.com/HewlettPackard/oneview-ansible#setting-your-oneview-version).' +""" - VALIDATEETAG = r''' + VALIDATEETAG = r""" options: - validate_etag: - description: - - When the ETag Validation is enabled, the request will be conditionally processed only if the current ETag - for the resource matches the ETag provided in the data. - type: bool - default: true -''' + validate_etag: + description: + - When the ETag Validation is enabled, the request is conditionally processed only if the current ETag for the resource + matches the ETag provided in the data. + type: bool + default: true +""" - FACTSPARAMS = r''' + FACTSPARAMS = r""" options: - params: - description: - - List of params to delimit, filter and sort the list of resources. - - "params allowed: - - C(start): The first item to return, using 0-based indexing. - - C(count): The number of resources to return. - - C(filter): A general filter/query string to narrow the list of items returned. - - C(sort): The sort order of the returned data set." - type: dict -''' + params: + description: + - List of parameters to delimit, filter and sort the list of resources. + - 'Parameter keys allowed are:' + - 'V(start): The first item to return, using 0-based indexing.' + - 'V(count): The number of resources to return.' + - 'V(filter): A general filter/query string to narrow the list of items returned.' + - 'V(sort): The sort order of the returned data set.' + type: dict +""" diff --git a/plugins/doc_fragments/online.py b/plugins/doc_fragments/online.py index d7e13765b0..c2b130e7a0 100644 --- a/plugins/doc_fragments/online.py +++ b/plugins/doc_fragments/online.py @@ -1,45 +1,41 @@ -# -*- coding: utf-8 -*- # Copyright (c) Ansible project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: api_token: description: - Online OAuth token. type: str required: true - aliases: [ oauth_token ] + aliases: [oauth_token] api_url: description: - - Online API URL + - Online API URL. type: str default: 'https://api.online.net' - aliases: [ base_url ] + aliases: [base_url] api_timeout: description: - HTTP timeout to Online API in seconds. type: int default: 30 - aliases: [ timeout ] + aliases: [timeout] validate_certs: description: - Validate SSL certs of the Online API. type: bool default: true notes: - - Also see the API documentation on U(https://console.online.net/en/api/) - - If C(api_token) is not set within the module, the following - environment variables can be used in decreasing order of precedence - C(ONLINE_TOKEN), C(ONLINE_API_KEY), C(ONLINE_OAUTH_TOKEN), C(ONLINE_API_TOKEN) - - If one wants to use a different C(api_url) one can also set the C(ONLINE_API_URL) - environment variable. -''' + - Also see the API documentation on U(https://console.online.net/en/api/). + - If O(api_token) is not set within the module, the following environment variables can be used in decreasing order of precedence + E(ONLINE_TOKEN), E(ONLINE_API_KEY), E(ONLINE_OAUTH_TOKEN), E(ONLINE_API_TOKEN). + - If one wants to use a different O(api_url) one can also set the E(ONLINE_API_URL) environment variable. +""" diff --git a/plugins/doc_fragments/opennebula.py b/plugins/doc_fragments/opennebula.py index 0fc323271a..72ccf7d70d 100644 --- a/plugins/doc_fragments/opennebula.py +++ b/plugins/doc_fragments/opennebula.py @@ -1,45 +1,43 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, www.privaz.io Valletech AB # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # OpenNebula common documentation - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: - api_url: - description: - - The ENDPOINT URL of the XMLRPC server. - - If not specified then the value of the ONE_URL environment variable, if any, is used. - type: str - aliases: - - api_endpoint - api_username: - description: - - The name of the user for XMLRPC authentication. - - If not specified then the value of the ONE_USERNAME environment variable, if any, is used. - type: str - api_password: - description: - - The password or token for XMLRPC authentication. - - If not specified then the value of the ONE_PASSWORD environment variable, if any, is used. - type: str - aliases: - - api_token - validate_certs: - description: - - Whether to validate the SSL certificates or not. - - This parameter is ignored if PYTHONHTTPSVERIFY environment variable is used. - type: bool - default: true - wait_timeout: - description: - - Time to wait for the desired state to be reached before timeout, in seconds. - type: int - default: 300 -''' + api_url: + description: + - The ENDPOINT URL of the XMLRPC server. + - If not specified then the value of the E(ONE_URL) environment variable, if any, is used. + type: str + aliases: + - api_endpoint + api_username: + description: + - The name of the user for XMLRPC authentication. + - If not specified then the value of the E(ONE_USERNAME) environment variable, if any, is used. + type: str + api_password: + description: + - The password or token for XMLRPC authentication. + - If not specified then the value of the E(ONE_PASSWORD) environment variable, if any, is used. + type: str + aliases: + - api_token + validate_certs: + description: + - Whether to validate the TLS/SSL certificates or not. + - This parameter is ignored if E(PYTHONHTTPSVERIFY) environment variable is used. + type: bool + default: true + wait_timeout: + description: + - Time to wait for the desired state to be reached before timeout, in seconds. + type: int + default: 300 +""" diff --git a/plugins/doc_fragments/openswitch.py b/plugins/doc_fragments/openswitch.py index 9d5f0be742..aac90e020f 100644 --- a/plugins/doc_fragments/openswitch.py +++ b/plugins/doc_fragments/openswitch.py @@ -1,85 +1,69 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2015, Peter Sprygada # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard files documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: host: description: - - Specifies the DNS host name or address for connecting to the remote - device over the specified transport. The value of host is used as - the destination address for the transport. Note this argument - does not affect the SSH argument. + - Specifies the DNS host name or address for connecting to the remote device over the specified transport. The value + of host is used as the destination address for the transport. Note this argument does not affect the SSH argument. type: str port: description: - - Specifies the port to use when building the connection to the remote - device. This value applies to either I(cli) or I(rest). The port - value will default to the appropriate transport common port if - none is provided in the task. (cli=22, http=80, https=443). Note - this argument does not affect the SSH transport. + - Specifies the port to use when building the connection to the remote device. This value applies to either O(transport=cli) + or O(transport=rest). The port value defaults to the appropriate transport common port if none is provided in the + task. (cli=22, http=80, https=443). Note this argument does not affect the SSH transport. type: int default: 0 (use common port) username: description: - - Configures the username to use to authenticate the connection to - the remote device. This value is used to authenticate - either the CLI login or the eAPI authentication depending on which - transport is used. Note this argument does not affect the SSH - transport. If the value is not specified in the task, the value of - environment variable C(ANSIBLE_NET_USERNAME) will be used instead. + - Configures the username to use to authenticate the connection to the remote device. This value is used to authenticate + either the CLI login or the eAPI authentication depending on which transport is used. Note this argument does not + affect the SSH transport. If the value is not specified in the task, the value of environment variable E(ANSIBLE_NET_USERNAME) + is used instead. type: str password: description: - - Specifies the password to use to authenticate the connection to - the remote device. This is a common argument used for either I(cli) - or I(rest) transports. Note this argument does not affect the SSH - transport. If the value is not specified in the task, the value of - environment variable C(ANSIBLE_NET_PASSWORD) will be used instead. + - Specifies the password to use to authenticate the connection to the remote device. This is a common argument used + for either O(transport=cli) or O(transport=rest). Note this argument does not affect the SSH transport. If the value + is not specified in the task, the value of environment variable E(ANSIBLE_NET_PASSWORD) is used instead. type: str timeout: description: - - Specifies the timeout in seconds for communicating with the network device - for either connecting or sending commands. If the timeout is - exceeded before the operation is completed, the module will error. + - Specifies the timeout in seconds for communicating with the network device for either connecting or sending commands. + If the timeout is exceeded before the operation is completed, the module fails. type: int default: 10 ssh_keyfile: description: - - Specifies the SSH key to use to authenticate the connection to - the remote device. This argument is only used for the I(cli) - transports. If the value is not specified in the task, the value of - environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead. + - Specifies the SSH key to use to authenticate the connection to the remote device. This argument is only used for O(transport=cli). + If the value is not specified in the task, the value of environment variable E(ANSIBLE_NET_SSH_KEYFILE) is used instead. type: path transport: description: - - Configures the transport connection to use when connecting to the - remote device. The transport argument supports connectivity to the - device over ssh, cli or REST. + - Configures the transport connection to use when connecting to the remote device. The transport argument supports connectivity + to the device over SSH (V(ssh)), CLI (V(cli)), or REST (V(rest)). required: true type: str - choices: [ cli, rest, ssh ] + choices: [cli, rest, ssh] default: ssh use_ssl: description: - - Configures the I(transport) to use SSL if set to C(true) only when the - I(transport) argument is configured as rest. If the transport - argument is not I(rest), this value is ignored. + - Configures the O(transport) to use SSL if set to V(true) only when the O(transport) argument is configured as rest. + If the transport argument is not V(rest), this value is ignored. type: bool default: true provider: description: - - Convenience method that allows all I(openswitch) arguments to be passed as - a dict object. All constraints (required, choices, etc) must be - met either by individual arguments or values in this dict. + - Convenience method that allows all C(openswitch) arguments to be passed as a dict object. All constraints (required, + choices, and so on) must be met either by individual arguments or values in this dict. type: dict -''' +""" diff --git a/plugins/doc_fragments/oracle.py b/plugins/doc_fragments/oracle.py index 9ca4706baa..05120f7aa3 100644 --- a/plugins/doc_fragments/oracle.py +++ b/plugins/doc_fragments/oracle.py @@ -1,84 +1,80 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + +# +# DEPRECATED +# +# This fragment is deprecated and will be removed in community.general 13.0.0 +# class ModuleDocFragment(object): - DOCUMENTATION = """ - requirements: - - "python >= 2.7" - - Python SDK for Oracle Cloud Infrastructure U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io) - notes: - - For OCI python sdk configuration, please refer to - U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/configuration.html) - options: - config_file_location: - description: - - Path to configuration file. If not set then the value of the OCI_CONFIG_FILE environment variable, - if any, is used. Otherwise, defaults to ~/.oci/config. - type: str - config_profile_name: - description: - - The profile to load from the config file referenced by C(config_file_location). If not set, then the - value of the OCI_CONFIG_PROFILE environment variable, if any, is used. Otherwise, defaults to the - "DEFAULT" profile in C(config_file_location). - default: "DEFAULT" - type: str - api_user: - description: - - The OCID of the user, on whose behalf, OCI APIs are invoked. If not set, then the - value of the OCI_USER_OCID environment variable, if any, is used. This option is required if the user - is not specified through a configuration file (See C(config_file_location)). To get the user's OCID, - please refer U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm). - type: str - api_user_fingerprint: - description: - - Fingerprint for the key pair being used. If not set, then the value of the OCI_USER_FINGERPRINT - environment variable, if any, is used. This option is required if the key fingerprint is not - specified through a configuration file (See C(config_file_location)). To get the key pair's - fingerprint value please refer - U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm). - type: str - api_user_key_file: - description: - - Full path and filename of the private key (in PEM format). If not set, then the value of the - OCI_USER_KEY_FILE variable, if any, is used. This option is required if the private key is - not specified through a configuration file (See C(config_file_location)). If the key is encrypted - with a pass-phrase, the C(api_user_key_pass_phrase) option must also be provided. - type: path - api_user_key_pass_phrase: - description: - - Passphrase used by the key referenced in C(api_user_key_file), if it is encrypted. If not set, then - the value of the OCI_USER_KEY_PASS_PHRASE variable, if any, is used. This option is required if the - key passphrase is not specified through a configuration file (See C(config_file_location)). - type: str - auth_type: - description: - - The type of authentication to use for making API requests. By default C(auth_type="api_key") based - authentication is performed and the API key (see I(api_user_key_file)) in your config file will be - used. If this 'auth_type' module option is not specified, the value of the OCI_ANSIBLE_AUTH_TYPE, - if any, is used. Use C(auth_type="instance_principal") to use instance principal based authentication - when running ansible playbooks within an OCI compute instance. - choices: ['api_key', 'instance_principal'] - default: 'api_key' - type: str - tenancy: - description: - - OCID of your tenancy. If not set, then the value of the OCI_TENANCY variable, if any, is - used. This option is required if the tenancy OCID is not specified through a configuration file - (See C(config_file_location)). To get the tenancy OCID, please refer - U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm) - type: str - region: - description: - - The Oracle Cloud Infrastructure region to use for all OCI API requests. If not set, then the - value of the OCI_REGION variable, if any, is used. This option is required if the region is - not specified through a configuration file (See C(config_file_location)). Please refer to - U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/regions.htm) for more information - on OCI regions. - type: str - """ + DOCUMENTATION = r""" +requirements: + - Python SDK for Oracle Cloud Infrastructure U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io) +notes: + - For OCI Python SDK configuration, please refer to U(https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/configuration.html). +options: + config_file_location: + description: + - Path to configuration file. If not set then the value of the E(OCI_CONFIG_FILE) environment variable, if any, is used. + Otherwise, defaults to C(~/.oci/config). + type: str + config_profile_name: + description: + - The profile to load from the config file referenced by O(config_file_location). If not set, then the value of the + E(OCI_CONFIG_PROFILE) environment variable, if any, is used. Otherwise, defaults to the C(DEFAULT) profile in O(config_file_location). + default: "DEFAULT" + type: str + api_user: + description: + - The OCID of the user, on whose behalf, OCI APIs are invoked. If not set, then the value of the E(OCI_USER_OCID) environment + variable, if any, is used. This option is required if the user is not specified through a configuration file (See + O(config_file_location)). To get the user's OCID, please refer U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm). + type: str + api_user_fingerprint: + description: + - Fingerprint for the key pair being used. If not set, then the value of the E(OCI_USER_FINGERPRINT) environment variable, + if any, is used. This option is required if the key fingerprint is not specified through a configuration file (See + O(config_file_location)). To get the key pair's fingerprint value please refer to + U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm). + type: str + api_user_key_file: + description: + - Full path and filename of the private key (in PEM format). If not set, then the value of the E(OCI_USER_KEY_FILE) + variable, if any, is used. This option is required if the private key is not specified through a configuration file + (See O(config_file_location)). If the key is encrypted with a pass-phrase, the O(api_user_key_pass_phrase) option + must also be provided. + type: path + api_user_key_pass_phrase: + description: + - Passphrase used by the key referenced in O(api_user_key_file), if it is encrypted. If not set, then the value of the + E(OCI_USER_KEY_PASS_PHRASE) variable, if any, is used. This option is required if the key passphrase is not specified + through a configuration file (See O(config_file_location)). + type: str + auth_type: + description: + - The type of authentication to use for making API requests. By default O(auth_type=api_key) based authentication is + performed and the API key (see O(api_user_key_file)) in your config file is used. If O(auth_type) is not specified, + the value of the E(OCI_ANSIBLE_AUTH_TYPE), if any, is used. Use O(auth_type=instance_principal) to use instance principal + based authentication when running ansible playbooks within an OCI compute instance. + choices: ['api_key', 'instance_principal'] + default: 'api_key' + type: str + tenancy: + description: + - OCID of your tenancy. If not set, then the value of the E(OCI_TENANCY) variable, if any, is used. This option is required + if the tenancy OCID is not specified through a configuration file (See O(config_file_location)). To get the tenancy + OCID, please refer to U(https://docs.us-phoenix-1.oraclecloud.com/Content/API/Concepts/apisigningkey.htm). + type: str + region: + description: + - The Oracle Cloud Infrastructure region to use for all OCI API requests. If not set, then the value of the E(OCI_REGION) + variable, if any, is used. This option is required if the region is not specified through a configuration file (See + O(config_file_location)). Please refer to U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/regions.htm) + for more information on OCI regions. + type: str +""" diff --git a/plugins/doc_fragments/oracle_creatable_resource.py b/plugins/doc_fragments/oracle_creatable_resource.py index 5293819199..1728e56d81 100644 --- a/plugins/doc_fragments/oracle_creatable_resource.py +++ b/plugins/doc_fragments/oracle_creatable_resource.py @@ -1,26 +1,29 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + +# +# DEPRECATED +# +# This fragment is deprecated and will be removed in community.general 13.0.0 +# class ModuleDocFragment(object): - DOCUMENTATION = """ - options: - force_create: - description: Whether to attempt non-idempotent creation of a resource. By default, create resource is an - idempotent operation, and doesn't create the resource if it already exists. Setting this option - to true, forcefully creates a copy of the resource, even if it already exists.This option is - mutually exclusive with I(key_by). - default: false - type: bool - key_by: - description: The list of comma-separated attributes of this resource which should be used to uniquely - identify an instance of the resource. By default, all the attributes of a resource except - I(freeform_tags) are used to uniquely identify a resource. - type: list - elements: str - """ + DOCUMENTATION = r""" +options: + force_create: + description: Whether to attempt non-idempotent creation of a resource. By default, create resource is an idempotent operation, + and does not create the resource if it already exists. Setting this option to V(true), forcefully creates a copy of + the resource, even if it already exists. This option is mutually exclusive with O(key_by). + default: false + type: bool + key_by: + description: The list of comma-separated attributes of this resource which should be used to uniquely identify an instance + of the resource. By default, all the attributes of a resource except O(freeform_tags) are used to uniquely identify + a resource. + type: list + elements: str +""" diff --git a/plugins/doc_fragments/oracle_display_name_option.py b/plugins/doc_fragments/oracle_display_name_option.py index eae5f44593..1ac210bbd4 100644 --- a/plugins/doc_fragments/oracle_display_name_option.py +++ b/plugins/doc_fragments/oracle_display_name_option.py @@ -1,17 +1,21 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + +# +# DEPRECATED +# +# This fragment is deprecated and will be removed in community.general 13.0.0 +# class ModuleDocFragment(object): - DOCUMENTATION = """ - options: - display_name: - description: Use I(display_name) along with the other options to return only resources that match the given - display name exactly. - type: str - """ + DOCUMENTATION = r""" +options: + display_name: + description: Use O(display_name) along with the other options to return only resources that match the given display name + exactly. + type: str +""" diff --git a/plugins/doc_fragments/oracle_name_option.py b/plugins/doc_fragments/oracle_name_option.py index 362071f946..a281bc5e68 100644 --- a/plugins/doc_fragments/oracle_name_option.py +++ b/plugins/doc_fragments/oracle_name_option.py @@ -1,17 +1,20 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + +# +# DEPRECATED +# +# This fragment is deprecated and will be removed in community.general 13.0.0 +# class ModuleDocFragment(object): - DOCUMENTATION = """ - options: - name: - description: Use I(name) along with the other options to return only resources that match the given name - exactly. - type: str - """ + DOCUMENTATION = r""" +options: + name: + description: Use O(name) along with the other options to return only resources that match the given name exactly. + type: str +""" diff --git a/plugins/doc_fragments/oracle_tags.py b/plugins/doc_fragments/oracle_tags.py index 3789dbe912..ec0096ba33 100644 --- a/plugins/doc_fragments/oracle_tags.py +++ b/plugins/doc_fragments/oracle_tags.py @@ -1,23 +1,25 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + +# +# DEPRECATED +# +# This fragment is deprecated and will be removed in community.general 13.0.0 +# class ModuleDocFragment(object): - DOCUMENTATION = """ - options: - defined_tags: - description: Defined tags for this resource. Each key is predefined and scoped to a namespace. For more - information, see - U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm). - type: dict - freeform_tags: - description: Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, - type, or namespace. For more information, see - U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm). - type: dict - """ + DOCUMENTATION = r""" +options: + defined_tags: + description: Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see + U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm). + type: dict + freeform_tags: + description: Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. + For more information, see U(https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm). + type: dict +""" diff --git a/plugins/doc_fragments/oracle_wait_options.py b/plugins/doc_fragments/oracle_wait_options.py index ce7ea776e2..868fb3cb04 100644 --- a/plugins/doc_fragments/oracle_wait_options.py +++ b/plugins/doc_fragments/oracle_wait_options.py @@ -1,27 +1,30 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Oracle and/or its affiliates. # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + +# +# DEPRECATED +# +# This fragment is deprecated and will be removed in community.general 13.0.0 +# class ModuleDocFragment(object): - DOCUMENTATION = """ - options: - wait: - description: Whether to wait for create or delete operation to complete. - default: true - type: bool - wait_timeout: - description: Time, in seconds, to wait when I(wait=true). - default: 1200 - type: int - wait_until: - description: The lifecycle state to wait for the resource to transition into when I(wait=true). By default, - when I(wait=true), we wait for the resource to get into ACTIVE/ATTACHED/AVAILABLE/PROVISIONED/ - RUNNING applicable lifecycle state during create operation & to get into DELETED/DETACHED/ - TERMINATED lifecycle state during delete operation. - type: str - """ + DOCUMENTATION = r""" +options: + wait: + description: Whether to wait for create or delete operation to complete. + default: true + type: bool + wait_timeout: + description: Time, in seconds, to wait when O(wait=true). + default: 1200 + type: int + wait_until: + description: The lifecycle state to wait for the resource to transition into when O(wait=true). By default, when O(wait=true), + we wait for the resource to get into ACTIVE/ATTACHED/AVAILABLE/PROVISIONED/ RUNNING applicable lifecycle state during + create operation and to get into DELETED/DETACHED/ TERMINATED lifecycle state during delete operation. + type: str +""" diff --git a/plugins/doc_fragments/pipx.py b/plugins/doc_fragments/pipx.py new file mode 100644 index 0000000000..70a502ddda --- /dev/null +++ b/plugins/doc_fragments/pipx.py @@ -0,0 +1,40 @@ + +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +class ModuleDocFragment(object): + DOCUMENTATION = r""" +options: + global: + description: + - The module passes the C(--global) argument to C(pipx), to execute actions in global scope. + type: bool + default: false + executable: + description: + - Path to the C(pipx) installed in the system. + - If not specified, the module uses C(python -m pipx) to run the tool, using the same Python interpreter as ansible + itself. + type: path +requirements: + - This module requires C(pipx) version 1.7.0 or above. + - Please note that C(pipx) 1.7.0 requires Python 3.8 or above. + - Please note that C(pipx) 1.8.0 requires Python 3.9 or above. +notes: + - This module does not install the C(pipx) python package, however that can be easily done with the module M(ansible.builtin.pip). + - This module does not require C(pipx) to be in the shell C(PATH), but it must be loadable by Python as a module, meaning + that C(python -m pipx) must work. + - This module honors C(pipx) environment variables such as but not limited to E(PIPX_HOME) and E(PIPX_BIN_DIR) passed using + the R(environment Ansible keyword, playbooks_environment). + - This module disabled emojis in the output of C(pipx) commands to reduce clutter. In C(pipx) 1.8.0, the environment variable + E(USE_EMOJI) was renamed to E(PIPX_USE_EMOJI) and for compatibility with both versions, starting in community.general + 11.4.0, this module sets them both to C(0) to disable emojis. +seealso: + - name: C(pipx) command manual page + description: Manual page for the command. + link: https://pipx.pypa.io/latest/docs/ +""" diff --git a/plugins/doc_fragments/pritunl.py b/plugins/doc_fragments/pritunl.py index 51ab979b54..17e03fc716 100644 --- a/plugins/doc_fragments/pritunl.py +++ b/plugins/doc_fragments/pritunl.py @@ -1,44 +1,37 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2021, Florian Dambrine # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function - -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): DOCUMENTATION = r""" options: - pritunl_url: - type: str - required: true - description: - - URL and port of the Pritunl server on which the API is enabled. - - pritunl_api_token: - type: str - required: true - description: - - API Token of a Pritunl admin user. - - It needs to be enabled in Administrators > USERNAME > Enable Token Authentication. - - pritunl_api_secret: - type: str - required: true - description: - - API Secret found in Administrators > USERNAME > API Secret. - - validate_certs: - type: bool - required: false - default: true - description: - - If certificates should be validated or not. - - This should never be set to C(false), except if you are very sure that - your connection to the server can not be subject to a Man In The Middle - attack. + pritunl_url: + type: str + required: true + description: + - URL and port of the Pritunl server on which the API is enabled. + pritunl_api_token: + type: str + required: true + description: + - API Token of a Pritunl admin user. + - It needs to be enabled in Administrators > USERNAME > Enable Token Authentication. + pritunl_api_secret: + type: str + required: true + description: + - API Secret found in Administrators > USERNAME > API Secret. + validate_certs: + type: bool + required: false + default: true + description: + - If certificates should be validated or not. + - This should never be set to V(false), except if you are very sure that your connection to the server can not be subject + to a Man In The Middle attack. """ diff --git a/plugins/doc_fragments/proxmox.py b/plugins/doc_fragments/proxmox.py deleted file mode 100644 index e39af4f3a6..0000000000 --- a/plugins/doc_fragments/proxmox.py +++ /dev/null @@ -1,65 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Ansible project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - # Common parameters for Proxmox VE modules - DOCUMENTATION = r''' -options: - api_host: - description: - - Specify the target host of the Proxmox VE cluster. - type: str - required: true - api_user: - description: - - Specify the user to authenticate with. - type: str - required: true - api_password: - description: - - Specify the password to authenticate with. - - You can use C(PROXMOX_PASSWORD) environment variable. - type: str - api_token_id: - description: - - Specify the token ID. - type: str - version_added: 1.3.0 - api_token_secret: - description: - - Specify the token secret. - type: str - version_added: 1.3.0 - validate_certs: - description: - - If C(false), SSL certificates will not be validated. - - This should only be used on personally controlled sites using self-signed certificates. - type: bool - default: false -requirements: [ "proxmoxer", "requests" ] -''' - - SELECTION = r''' -options: - vmid: - description: - - Specifies the instance ID. - - If not set the next available ID will be fetched from ProxmoxAPI. - type: int - node: - description: - - Proxmox VE node on which to operate. - - Only required for I(state=present). - - For every other states it will be autodiscovered. - type: str - pool: - description: - - Add the new VM to the specified pool. - type: str -''' diff --git a/plugins/doc_fragments/purestorage.py b/plugins/doc_fragments/purestorage.py deleted file mode 100644 index 8db8c3b3da..0000000000 --- a/plugins/doc_fragments/purestorage.py +++ /dev/null @@ -1,63 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (c) 2017, Simon Dodsley -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - - # Standard Pure Storage documentation fragment - DOCUMENTATION = r''' -options: - - See separate platform section for more details -requirements: - - See separate platform section for more details -notes: - - Ansible modules are available for the following Pure Storage products: FlashArray, FlashBlade -''' - - # Documentation fragment for FlashBlade - FB = r''' -options: - fb_url: - description: - - FlashBlade management IP address or Hostname. - type: str - api_token: - description: - - FlashBlade API token for admin privileged user. - type: str -notes: - - This module requires the C(purity_fb) Python library - - You must set C(PUREFB_URL) and C(PUREFB_API) environment variables - if I(fb_url) and I(api_token) arguments are not passed to the module directly -requirements: - - python >= 2.7 - - purity_fb >= 1.1 -''' - - # Documentation fragment for FlashArray - FA = r''' -options: - fa_url: - description: - - FlashArray management IPv4 address or Hostname. - type: str - required: true - api_token: - description: - - FlashArray API token for admin privileged user. - type: str - required: true -notes: - - This module requires the C(purestorage) Python library - - You must set C(PUREFA_URL) and C(PUREFA_API) environment variables - if I(fa_url) and I(api_token) arguments are not passed to the module directly -requirements: - - python >= 2.7 - - purestorage -''' diff --git a/plugins/doc_fragments/rackspace.py b/plugins/doc_fragments/rackspace.py deleted file mode 100644 index 9e22316022..0000000000 --- a/plugins/doc_fragments/rackspace.py +++ /dev/null @@ -1,118 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (c) 2014, Matt Martz -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): - - # Standard Rackspace only documentation fragment - DOCUMENTATION = r''' -options: - api_key: - description: - - Rackspace API key, overrides I(credentials). - type: str - aliases: [ password ] - credentials: - description: - - File to find the Rackspace credentials in. Ignored if I(api_key) and - I(username) are provided. - type: path - aliases: [ creds_file ] - env: - description: - - Environment as configured in I(~/.pyrax.cfg), - see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration). - type: str - region: - description: - - Region to create an instance in. - type: str - username: - description: - - Rackspace username, overrides I(credentials). - type: str - validate_certs: - description: - - Whether or not to require SSL validation of API endpoints. - type: bool - aliases: [ verify_ssl ] -requirements: - - python >= 2.6 - - pyrax -notes: - - The following environment variables can be used, C(RAX_USERNAME), - C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION). - - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file - appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating) - - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file - - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) -''' - - # Documentation fragment including attributes to enable communication - # of other OpenStack clouds. Not all rax modules support this. - OPENSTACK = r''' -options: - api_key: - type: str - description: - - Rackspace API key, overrides I(credentials). - aliases: [ password ] - auth_endpoint: - type: str - description: - - The URI of the authentication service. - - If not specified will be set to U(https://identity.api.rackspacecloud.com/v2.0/) - credentials: - type: path - description: - - File to find the Rackspace credentials in. Ignored if I(api_key) and - I(username) are provided. - aliases: [ creds_file ] - env: - type: str - description: - - Environment as configured in I(~/.pyrax.cfg), - see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration). - identity_type: - type: str - description: - - Authentication mechanism to use, such as rackspace or keystone. - default: rackspace - region: - type: str - description: - - Region to create an instance in. - tenant_id: - type: str - description: - - The tenant ID used for authentication. - tenant_name: - type: str - description: - - The tenant name used for authentication. - username: - type: str - description: - - Rackspace username, overrides I(credentials). - validate_certs: - description: - - Whether or not to require SSL validation of API endpoints. - type: bool - aliases: [ verify_ssl ] -requirements: - - python >= 2.6 - - pyrax -notes: - - The following environment variables can be used, C(RAX_USERNAME), - C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION). - - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file - appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating) - - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file - - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...) -''' diff --git a/plugins/doc_fragments/redfish.py b/plugins/doc_fragments/redfish.py new file mode 100644 index 0000000000..ed95eeab83 --- /dev/null +++ b/plugins/doc_fragments/redfish.py @@ -0,0 +1,35 @@ + +# Copyright (c) 2025 Ansible community +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +class ModuleDocFragment(object): + + # Use together with the community.general.redfish module utils' REDFISH_COMMON_ARGUMENT_SPEC + DOCUMENTATION = r""" +options: + validate_certs: + description: + - If V(false), TLS/SSL certificates are not validated. + - Set this to V(true) to enable certificate checking. Should be used together with O(ca_path). + type: bool + default: false + ca_path: + description: + - PEM formatted file that contains a CA certificate to be used for validation. + - Only used if O(validate_certs=true). + type: path + ciphers: + required: false + description: + - TLS/SSL Ciphers to use for the request. + - When a list is provided, all ciphers are joined in order with V(:). + - See the L(OpenSSL Cipher List Format,https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT) + for more details. + - The available ciphers is dependent on the Python and OpenSSL/LibreSSL versions. + type: list + elements: str +""" diff --git a/plugins/doc_fragments/redis.py b/plugins/doc_fragments/redis.py index 2d40330519..38889a3cbd 100644 --- a/plugins/doc_fragments/redis.py +++ b/plugins/doc_fragments/redis.py @@ -1,16 +1,14 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2021, Andreas Botzner # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Common parameters for Redis modules - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: login_host: description: @@ -40,19 +38,26 @@ options: validate_certs: description: - Specify whether or not to validate TLS certificates. - - This should only be turned off for personally controlled sites or with - C(localhost) as target. + - This should only be turned off for personally controlled sites or with C(localhost) as target. type: bool default: true ca_certs: description: - - Path to root certificates file. If not set and I(tls) is - set to C(true), certifi ca-certificates will be used. + - Path to root certificates file. If not set and O(tls) is set to V(true), certifi's CA certificates are used. type: str -requirements: [ "redis", "certifi" ] + client_cert_file: + description: + - Path to the client certificate file. + type: str + version_added: 9.3.0 + client_key_file: + description: + - Path to the client private key file. + type: str + version_added: 9.3.0 +requirements: ["redis", "certifi"] notes: - - Requires the C(redis) Python package on the remote host. You can - install it with pip (C(pip install redis)) or with a package manager. - Information on the library can be found at U(https://github.com/andymccurdy/redis-py). -''' + - Requires the C(redis) Python package on the remote host. You can install it with pip (C(pip install redis)) or with a + package manager. Information on the library can be found at U(https://github.com/andymccurdy/redis-py). +""" diff --git a/plugins/doc_fragments/rundeck.py b/plugins/doc_fragments/rundeck.py index 62c8648e96..3e9d99aa7a 100644 --- a/plugins/doc_fragments/rundeck.py +++ b/plugins/doc_fragments/rundeck.py @@ -1,17 +1,15 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2021, Phillipe Smith # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard files documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: url: type: str @@ -29,4 +27,4 @@ options: description: - Rundeck User API Token. required: true -''' +""" diff --git a/plugins/doc_fragments/scaleway.py b/plugins/doc_fragments/scaleway.py index b08d11dbb0..7810deb901 100644 --- a/plugins/doc_fragments/scaleway.py +++ b/plugins/doc_fragments/scaleway.py @@ -1,39 +1,37 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Yanis Guenane # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: api_token: description: - Scaleway OAuth token. type: str required: true - aliases: [ oauth_token ] + aliases: [oauth_token] api_url: description: - Scaleway API URL. type: str default: https://api.scaleway.com - aliases: [ base_url ] + aliases: [base_url] api_timeout: description: - HTTP timeout to Scaleway API in seconds. type: int default: 30 - aliases: [ timeout ] + aliases: [timeout] query_parameters: description: - - List of parameters passed to the query string. + - List of parameters passed to the query string. type: dict default: {} validate_certs: @@ -42,10 +40,18 @@ options: type: bool default: true notes: - - Also see the API documentation on U(https://developer.scaleway.com/) - - If C(api_token) is not set within the module, the following - environment variables can be used in decreasing order of precedence - C(SCW_TOKEN), C(SCW_API_KEY), C(SCW_OAUTH_TOKEN) or C(SCW_API_TOKEN). - - If one wants to use a different C(api_url) one can also set the C(SCW_API_URL) - environment variable. -''' + - Also see the API documentation on U(https://developer.scaleway.com/). + - If O(api_token) is not set within the module, the following environment variables can be used in decreasing order of precedence + E(SCW_TOKEN), E(SCW_API_KEY), E(SCW_OAUTH_TOKEN) or E(SCW_API_TOKEN). + - If one wants to use a different O(api_url) one can also set the E(SCW_API_URL) environment variable. +""" + + ACTIONGROUP_SCALEWAY = r""" +options: {} +attributes: + action_group: + description: Use C(group/community.general.scaleway) in C(module_defaults) to set defaults for this module. + support: full + membership: + - community.general.scaleway +""" diff --git a/plugins/doc_fragments/scaleway_waitable_resource.py b/plugins/doc_fragments/scaleway_waitable_resource.py index 3ab5c7d6f4..2a14c7571e 100644 --- a/plugins/doc_fragments/scaleway_waitable_resource.py +++ b/plugins/doc_fragments/scaleway_waitable_resource.py @@ -1,33 +1,31 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2022, Guillaume MARTINEZ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Standard documentation fragment - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: wait: description: - - Wait for the resource to reach its desired state before returning. + - Wait for the resource to reach its desired state before returning. type: bool default: true wait_timeout: type: int description: - - Time to wait for the resource to reach the expected state. + - Time to wait for the resource to reach the expected state. required: false default: 300 wait_sleep_time: type: int description: - - Time to wait before every attempt to check the state of the resource. + - Time to wait before every attempt to check the state of the resource. required: false default: 3 -''' +""" diff --git a/plugins/doc_fragments/utm.py b/plugins/doc_fragments/utm.py index 73ad805035..831f4ccc96 100644 --- a/plugins/doc_fragments/utm.py +++ b/plugins/doc_fragments/utm.py @@ -1,56 +1,55 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Johannes Brunswicker # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: - headers: - description: - - A dictionary of additional headers to be sent to POST and PUT requests. - - Is needed for some modules - type: dict - required: false - default: {} - utm_host: - description: - - The REST Endpoint of the Sophos UTM. - type: str - required: true - utm_port: - description: - - The port of the REST interface. - type: int - default: 4444 - utm_token: - description: - - "The token used to identify at the REST-API. See U(https://www.sophos.com/en-us/medialibrary/\ - PDFs/documentation/UTMonAWS/Sophos-UTM-RESTful-API.pdf?la=en), Chapter 2.4.2." - type: str - required: true - utm_protocol: - description: - - The protocol of the REST Endpoint. - choices: [ http, https ] - type: str - default: https - validate_certs: - description: - - Whether the REST interface's ssl certificate should be verified or not. - type: bool - default: true - state: - description: - - The desired state of the object. - - C(present) will create or update an object - - C(absent) will delete an object if it was present - type: str - choices: [ absent, present ] - default: present -''' + headers: + description: + - A dictionary of additional headers to be sent to POST and PUT requests. + - Is needed for some modules. + type: dict + required: false + default: {} + utm_host: + description: + - The REST Endpoint of the Sophos UTM. + type: str + required: true + utm_port: + description: + - The port of the REST interface. + type: int + default: 4444 + utm_token: + description: + - The token used to identify at the REST-API. + - See U(https://www.sophos.com/en-us/medialibrary/PDFs/documentation/UTMonAWS/Sophos-UTM-RESTful-API.pdf?la=en), Chapter + 2.4.2. + type: str + required: true + utm_protocol: + description: + - The protocol of the REST Endpoint. + choices: [http, https] + type: str + default: https + validate_certs: + description: + - Whether the REST interface's SSL certificate should be verified or not. + type: bool + default: true + state: + description: + - The desired state of the object. + - V(present) creates or updates an object. + - V(absent) deletes an object if present. + type: str + choices: [absent, present] + default: present +""" diff --git a/plugins/doc_fragments/vexata.py b/plugins/doc_fragments/vexata.py index ff79613eec..3ca6684469 100644 --- a/plugins/doc_fragments/vexata.py +++ b/plugins/doc_fragments/vexata.py @@ -1,24 +1,13 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2019, Sandeep Kasargod # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): - DOCUMENTATION = r''' -options: - - See respective platform section for more details -requirements: - - See respective platform section for more details -notes: - - Ansible modules are available for Vexata VX100 arrays. -''' - # Documentation fragment for Vexata VX100 series VX100 = r''' options: @@ -30,17 +19,19 @@ options: user: description: - Vexata API user with administrative privileges. + - Uses the E(VEXATA_USER) environment variable as a fallback. required: false type: str password: description: - Vexata API user password. + - Uses the E(VEXATA_PASSWORD) environment variable as a fallback. required: false type: str validate_certs: description: - - Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted. - - If set to C(true), please make sure Python >= 2.7.9 is installed on the given machine. + - Allows connection when SSL certificates are not valid. Set to V(false) when certificates are not trusted. + - If set to V(true), please make sure Python >= 2.7.9 is installed on the given machine. required: false type: bool default: false @@ -48,7 +39,6 @@ options: requirements: - Vexata VX100 storage array with VXOS >= v3.5.0 on storage array - vexatapi >= 0.0.1 - - python >= 2.7 - - VEXATA_USER and VEXATA_PASSWORD environment variables must be set if + - E(VEXATA_USER) and E(VEXATA_PASSWORD) environment variables must be set if user and password arguments are not passed to the module directly. ''' diff --git a/plugins/doc_fragments/xenserver.py b/plugins/doc_fragments/xenserver.py index eaee173849..7da1391420 100644 --- a/plugins/doc_fragments/xenserver.py +++ b/plugins/doc_fragments/xenserver.py @@ -1,41 +1,39 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Bojan Vitnik # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations class ModuleDocFragment(object): # Common parameters for XenServer modules - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: hostname: description: - - The hostname or IP address of the XenServer host or XenServer pool master. - - If the value is not specified in the task, the value of environment variable C(XENSERVER_HOST) will be used instead. + - The hostname or IP address of the XenServer host or XenServer pool master. + - If the value is not specified in the task, the value of environment variable E(XENSERVER_HOST) is used instead. type: str default: localhost - aliases: [ host, pool ] + aliases: [host, pool] username: description: - - The username to use for connecting to XenServer. - - If the value is not specified in the task, the value of environment variable C(XENSERVER_USER) will be used instead. + - The username to use for connecting to XenServer. + - If the value is not specified in the task, the value of environment variable E(XENSERVER_USER) is used instead. type: str default: root - aliases: [ admin, user ] + aliases: [admin, user] password: description: - - The password to use for connecting to XenServer. - - If the value is not specified in the task, the value of environment variable C(XENSERVER_PASSWORD) will be used instead. + - The password to use for connecting to XenServer. + - If the value is not specified in the task, the value of environment variable E(XENSERVER_PASSWORD) is used instead. type: str - aliases: [ pass, pwd ] + aliases: [pass, pwd] validate_certs: description: - - Allows connection when SSL certificates are not valid. Set to C(false) when certificates are not trusted. - - If the value is not specified in the task, the value of environment variable C(XENSERVER_VALIDATE_CERTS) will be used instead. + - Allows connection when SSL certificates are not valid. Set to V(false) when certificates are not trusted. + - If the value is not specified in the task, the value of environment variable E(XENSERVER_VALIDATE_CERTS) is used instead. type: bool default: true -''' +""" diff --git a/plugins/filter/accumulate.py b/plugins/filter/accumulate.py new file mode 100644 index 0000000000..da784ab12b --- /dev/null +++ b/plugins/filter/accumulate.py @@ -0,0 +1,62 @@ +# Copyright (c) Max Gautier +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION = r""" +name: accumulate +short_description: Produce a list of accumulated sums of the input list contents +version_added: 10.1.0 +author: Max Gautier (@VannTen) +description: + - Passthrough to the L(Python itertools.accumulate function,https://docs.python.org/3/library/itertools.html#itertools.accumulate). + - Transforms an input list into the cumulative list of results from applying addition to the elements of the input list. + - Addition means the default Python implementation of C(+) for input list elements type. +options: + _input: + description: A list. + type: list + elements: any + required: true +""" + +RETURN = r""" +_value: + description: A list of cumulated sums of the elements of the input list. + type: list + elements: any +""" + +EXAMPLES = r""" +- name: Enumerate parent directories of some path + ansible.builtin.debug: + var: > + "/some/path/to/my/file" + | split('/') | map('split', '/') + | community.general.accumulate | map('join', '/') + # Produces: ['', '/some', '/some/path', '/some/path/to', '/some/path/to/my', '/some/path/to/my/file'] + +- name: Growing string + ansible.builtin.debug: + var: "'abc' | community.general.accumulate" + # Produces ['a', 'ab', 'abc'] +""" + +from itertools import accumulate +from collections.abc import Sequence + +from ansible.errors import AnsibleFilterError + + +def list_accumulate(sequence): + if not isinstance(sequence, Sequence): + raise AnsibleFilterError(f'Invalid value type ({type(sequence)}) for accumulate ({sequence!r})') + + return accumulate(sequence) + + +class FilterModule(object): + + def filters(self): + return { + 'accumulate': list_accumulate, + } diff --git a/plugins/filter/counter.py b/plugins/filter/counter.py index 1b79294b59..f89bfd6d1a 100644 --- a/plugins/filter/counter.py +++ b/plugins/filter/counter.py @@ -1,56 +1,54 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2021, Remy Keil # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: counter - short_description: Counts hashable elements in a sequence - version_added: 4.3.0 - author: Rémy Keil (@keilr) - description: - - Counts hashable elements in a sequence. - options: - _input: - description: A sequence. - type: list - elements: any - required: true -''' +DOCUMENTATION = r""" +name: counter +short_description: Counts hashable elements in a sequence +version_added: 4.3.0 +author: Rémy Keil (@keilr) +description: + - Counts hashable elements in a sequence. +options: + _input: + description: A sequence. + type: list + elements: any + required: true +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Count occurrences ansible.builtin.debug: msg: >- {{ [1, 'a', 2, 2, 'a', 'b', 'a'] | community.general.counter }} # Produces: {1: 1, 'a': 3, 2: 2, 'b': 1} -''' +""" -RETURN = ''' - _value: - description: A dictionary with the elements of the sequence as keys, and their number of occurrences in the sequence as values. - type: dictionary -''' +RETURN = r""" +_value: + description: A dictionary with the elements of the sequence as keys, and their number of occurrences in the sequence as + values. + type: dictionary +""" from ansible.errors import AnsibleFilterError -from ansible.module_utils.common._collections_compat import Sequence +from collections.abc import Sequence from collections import Counter def counter(sequence): ''' Count elements in a sequence. Returns dict with count result. ''' if not isinstance(sequence, Sequence): - raise AnsibleFilterError('Argument for community.general.counter must be a sequence (string or list). %s is %s' % - (sequence, type(sequence))) + raise AnsibleFilterError(f'Argument for community.general.counter must be a sequence (string or list). {sequence} is {type(sequence)}') try: result = dict(Counter(sequence)) except TypeError as e: raise AnsibleFilterError( - "community.general.counter needs a sequence with hashable elements (int, float or str) - %s" % (e) + f"community.general.counter needs a sequence with hashable elements (int, float or str) - {e}" ) return result diff --git a/plugins/filter/crc32.py b/plugins/filter/crc32.py index 1f0aa2e9b0..11a6e77495 100644 --- a/plugins/filter/crc32.py +++ b/plugins/filter/crc32.py @@ -1,9 +1,7 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2022, Julien Riou # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from ansible.errors import AnsibleFilterError from ansible.module_utils.common.text.converters import to_bytes @@ -16,45 +14,44 @@ except ImportError: HAS_ZLIB = False -DOCUMENTATION = ''' - name: crc32 - short_description: Generate a CRC32 checksum - version_added: 5.4.0 - description: - - Checksum a string using CRC32 algorithm and return its hexadecimal representation. - options: - _input: - description: - - The string to checksum. - type: string - required: true - author: - - Julien Riou -''' - -EXAMPLES = ''' - - name: Checksum a test string - ansible.builtin.debug: - msg: "{{ 'test' | community.general.crc32 }}" -''' - -RETURN = ''' - _value: - description: CRC32 checksum. +DOCUMENTATION = r""" +name: crc32 +short_description: Generate a CRC32 checksum +version_added: 5.4.0 +description: + - Checksum a string using CRC32 algorithm and return its hexadecimal representation. +options: + _input: + description: + - The string to checksum. type: string -''' + required: true +author: + - Julien Riou +""" + +EXAMPLES = r""" +- name: Checksum a test string + ansible.builtin.debug: + msg: "{{ 'test' | community.general.crc32 }}" +""" + +RETURN = r""" +_value: + description: CRC32 checksum. + type: string +""" def crc32s(value): if not is_string(value): - raise AnsibleFilterError('Invalid value type (%s) for crc32 (%r)' % - (type(value), value)) + raise AnsibleFilterError(f'Invalid value type ({type(value)}) for crc32 ({value!r})') if not HAS_ZLIB: raise AnsibleFilterError('Failed to import zlib module') data = to_bytes(value, errors='surrogate_or_strict') - return "{0:x}".format(crc32(data) & 0xffffffff) + return f"{crc32(data) & 0xffffffff:x}" class FilterModule: diff --git a/plugins/filter/dict.py b/plugins/filter/dict.py index 720c9def96..d2d8bb952c 100644 --- a/plugins/filter/dict.py +++ b/plugins/filter/dict.py @@ -1,28 +1,26 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2021, Felix Fontein # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: dict - short_description: Convert a list of tuples into a dictionary - version_added: 3.0.0 - author: Felix Fontein (@felixfontein) - description: - - Convert a list of tuples into a dictionary. This is a filter version of the C(dict) function. - options: - _input: - description: A list of tuples (with exactly two elements). - type: list - elements: tuple - required: true -''' +DOCUMENTATION = r""" +name: dict +short_description: Convert a list of tuples into a dictionary +version_added: 3.0.0 +author: Felix Fontein (@felixfontein) +description: + - Convert a list of tuples into a dictionary. This is a filter version of the C(dict) function. +options: + _input: + description: A list of tuples (with exactly two elements). + type: list + elements: tuple + required: true +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Convert list of tuples into dictionary ansible.builtin.set_fact: dictionary: "{{ [[1, 2], ['a', 'b']] | community.general.dict }}" @@ -53,13 +51,13 @@ EXAMPLES = ''' # "k2": 42, # "k3": "b" # } -''' +""" -RETURN = ''' - _value: - description: The dictionary having the provided key-value pairs. - type: boolean -''' +RETURN = r""" +_value: + description: A dictionary with the provided key-value pairs. + type: dictionary +""" def dict_filter(sequence): diff --git a/plugins/filter/dict_kv.py b/plugins/filter/dict_kv.py index 59595f9573..79c8dd0fe6 100644 --- a/plugins/filter/dict_kv.py +++ b/plugins/filter/dict_kv.py @@ -1,42 +1,40 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2020 Stanislav German-Evtushenko (@giner) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: dict_kv - short_description: Convert a value to a dictionary with a single key-value pair - version_added: 1.3.0 - author: Stanislav German-Evtushenko (@giner) - description: - - Convert a value to a dictionary with a single key-value pair. - positional: key - options: - _input: - description: The value for the single key-value pair. - type: any - required: true - key: - description: The key for the single key-value pair. - type: any - required: true -''' +DOCUMENTATION = r""" +name: dict_kv +short_description: Convert a value to a dictionary with a single key-value pair +version_added: 1.3.0 +author: Stanislav German-Evtushenko (@giner) +description: + - Convert a value to a dictionary with a single key-value pair. +positional: key +options: + _input: + description: The value for the single key-value pair. + type: any + required: true + key: + description: The key for the single key-value pair. + type: any + required: true +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a one-element dictionary from a value ansible.builtin.debug: msg: "{{ 'myvalue' | dict_kv('mykey') }}" # Produces the dictionary {'mykey': 'myvalue'} -''' +""" -RETURN = ''' - _value: - description: A dictionary with a single key-value pair. - type: dictionary -''' +RETURN = r""" +_value: + description: A dictionary with a single key-value pair. + type: dictionary +""" def dict_kv(value, key): diff --git a/plugins/filter/from_csv.py b/plugins/filter/from_csv.py index 6472b67b1a..160eed959e 100644 --- a/plugins/filter/from_csv.py +++ b/plugins/filter/from_csv.py @@ -1,62 +1,60 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2021, Andrew Pantuso (@ajpantuso) # Copyright (c) 2018, Dag Wieers (@dagwieers) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: from_csv - short_description: Converts CSV text input into list of dicts - version_added: 2.3.0 - author: Andrew Pantuso (@Ajpantuso) - description: - - Converts CSV text input into list of dictionaries. - options: - _input: - description: A string containing a CSV document. - type: string - required: true - dialect: - description: - - The CSV dialect to use when parsing the CSV file. - - Possible values include C(excel), C(excel-tab) or C(unix). - type: str - default: excel - fieldnames: - description: - - A list of field names for every column. - - This is needed if the CSV does not have a header. - type: list - elements: str - delimiter: - description: - - A one-character string used to separate fields. - - When using this parameter, you change the default value used by I(dialect). - - The default value depends on the dialect used. - type: str - skipinitialspace: - description: - - Whether to ignore any whitespaces immediately following the delimiter. - - When using this parameter, you change the default value used by I(dialect). - - The default value depends on the dialect used. - type: bool - strict: - description: - - Whether to raise an exception on bad CSV input. - - When using this parameter, you change the default value used by I(dialect). - - The default value depends on the dialect used. - type: bool -''' +DOCUMENTATION = r""" +name: from_csv +short_description: Converts CSV text input into list of dicts +version_added: 2.3.0 +author: Andrew Pantuso (@Ajpantuso) +description: + - Converts CSV text input into list of dictionaries. +options: + _input: + description: A string containing a CSV document. + type: string + required: true + dialect: + description: + - The CSV dialect to use when parsing the CSV file. + - Possible values include V(excel), V(excel-tab) or V(unix). + type: str + default: excel + fieldnames: + description: + - A list of field names for every column. + - This is needed if the CSV does not have a header. + type: list + elements: str + delimiter: + description: + - A one-character string used to separate fields. + - When using this parameter, you change the default value used by O(dialect). + - The default value depends on the dialect used. + type: str + skipinitialspace: + description: + - Whether to ignore any whitespaces immediately following the delimiter. + - When using this parameter, you change the default value used by O(dialect). + - The default value depends on the dialect used. + type: bool + strict: + description: + - Whether to raise an exception on bad CSV input. + - When using this parameter, you change the default value used by O(dialect). + - The default value depends on the dialect used. + type: bool +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Parse a CSV file's contents ansible.builtin.debug: msg: >- - {{ csv_data | community.genera.from_csv(dialect='unix') }} + {{ csv_data | community.general.from_csv(dialect='unix') }} vars: csv_data: | Column 1,Value @@ -71,17 +69,16 @@ EXAMPLES = ''' # "Column 1": "bar", # "Value": "42", # } -''' +""" -RETURN = ''' - _value: - description: A list with one dictionary per row. - type: list - elements: dictionary -''' +RETURN = r""" +_value: + description: A list with one dictionary per row. + type: list + elements: dictionary +""" from ansible.errors import AnsibleFilterError -from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.csv import (initialize_dialect, read_csv, CSVError, DialectNotAvailableError, @@ -99,7 +96,7 @@ def from_csv(data, dialect='excel', fieldnames=None, delimiter=None, skipinitial try: dialect = initialize_dialect(dialect, **dialect_params) except (CustomDialectFailureError, DialectNotAvailableError) as e: - raise AnsibleFilterError(to_native(e)) + raise AnsibleFilterError(str(e)) reader = read_csv(data, dialect, fieldnames) @@ -109,7 +106,7 @@ def from_csv(data, dialect='excel', fieldnames=None, delimiter=None, skipinitial for row in reader: data_list.append(row) except CSVError as e: - raise AnsibleFilterError("Unable to process file: %s" % to_native(e)) + raise AnsibleFilterError(f"Unable to process file: {e}") return data_list diff --git a/plugins/filter/from_ini.py b/plugins/filter/from_ini.py new file mode 100644 index 0000000000..07b16d4ac2 --- /dev/null +++ b/plugins/filter/from_ini.py @@ -0,0 +1,95 @@ + +# Copyright (c) 2023, Steffen Scheib +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: from_ini +short_description: Converts INI text input into a dictionary +version_added: 8.2.0 +author: Steffen Scheib (@sscheib) +description: + - Converts INI text input into a dictionary. +options: + _input: + description: A string containing an INI document. + type: string + required: true +""" + +EXAMPLES = r""" +- name: Slurp an INI file + ansible.builtin.slurp: + src: /etc/rhsm/rhsm.conf + register: rhsm_conf + +- name: Display the INI file as dictionary + ansible.builtin.debug: + var: rhsm_conf.content | b64decode | community.general.from_ini + +- name: Set a new dictionary fact with the contents of the INI file + ansible.builtin.set_fact: + rhsm_dict: >- + {{ + rhsm_conf.content | b64decode | community.general.from_ini + }} +""" + +RETURN = r""" +_value: + description: A dictionary representing the INI file. + type: dictionary +""" + + +from io import StringIO +from configparser import ConfigParser + +from ansible.errors import AnsibleFilterError + + +class IniParser(ConfigParser): + ''' Implements a configparser which is able to return a dict ''' + + def __init__(self): + super().__init__(interpolation=None) + self.optionxform = str + + def as_dict(self): + d = dict(self._sections) + for k in d: + d[k] = dict(self._defaults, **d[k]) + d[k].pop('__name__', None) + + if self._defaults: + d['DEFAULT'] = dict(self._defaults) + + return d + + +def from_ini(obj): + ''' Read the given string as INI file and return a dict ''' + + if not isinstance(obj, str): + raise AnsibleFilterError(f'from_ini requires a str, got {type(obj)}') + + parser = IniParser() + + try: + parser.read_file(StringIO(obj)) + except Exception as ex: + raise AnsibleFilterError(f'from_ini failed to parse given string: {ex}', orig_exc=ex) + + return parser.as_dict() + + +class FilterModule(object): + ''' Query filter ''' + + def filters(self): + + return { + 'from_ini': from_ini + } diff --git a/plugins/filter/groupby_as_dict.py b/plugins/filter/groupby_as_dict.py index 4a8f4c6dc1..766d365575 100644 --- a/plugins/filter/groupby_as_dict.py +++ b/plugins/filter/groupby_as_dict.py @@ -1,32 +1,32 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2021, Felix Fontein # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: groupby_as_dict - short_description: Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute - version_added: 3.1.0 - author: Felix Fontein (@felixfontein) - description: - - Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute. - positional: attribute - options: - _input: - description: A list of dictionaries - type: list - elements: dictionary - required: true - attribute: - description: The attribute to use as the key. - type: str - required: true -''' +DOCUMENTATION = r""" +name: groupby_as_dict +short_description: Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute +version_added: 3.1.0 +author: Felix Fontein (@felixfontein) +description: + - Transform a sequence of dictionaries to a dictionary where the dictionaries are indexed by an attribute. + - This filter is similar to the Jinja2 C(groupby) filter. Use the Jinja2 C(groupby) filter if you have multiple entries + with the same value, or when you need a dictionary with list values, or when you need to use deeply nested attributes. +positional: attribute +options: + _input: + description: A list of dictionaries. + type: list + elements: dictionary + required: true + attribute: + description: The attribute to use as the key. + type: str + required: true +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Arrange a list of dictionaries as a dictionary of dictionaries ansible.builtin.debug: msg: "{{ sequence | community.general.groupby_as_dict('key') }}" @@ -44,16 +44,16 @@ EXAMPLES = ''' # other_value: # key: other_value # baz: bar -''' +""" -RETURN = ''' - _value: - description: A dictionary containing the dictionaries from the list as values. - type: dictionary -''' +RETURN = r""" +_value: + description: A dictionary containing the dictionaries from the list as values. + type: dictionary +""" from ansible.errors import AnsibleFilterError -from ansible.module_utils.common._collections_compat import Mapping, Sequence +from collections.abc import Mapping, Sequence def groupby_as_dict(sequence, attribute): @@ -70,12 +70,12 @@ def groupby_as_dict(sequence, attribute): result = dict() for list_index, element in enumerate(sequence): if not isinstance(element, Mapping): - raise AnsibleFilterError('Sequence element #{0} is not a mapping'.format(list_index)) + raise AnsibleFilterError(f'Sequence element #{list_index} is not a mapping') if attribute not in element: - raise AnsibleFilterError('Attribute not contained in element #{0} of sequence'.format(list_index)) + raise AnsibleFilterError(f'Attribute not contained in element #{list_index} of sequence') result_index = element[attribute] if result_index in result: - raise AnsibleFilterError('Multiple sequence entries have attribute value {0!r}'.format(result_index)) + raise AnsibleFilterError(f'Multiple sequence entries have attribute value {result_index!r}') result[result_index] = element return result diff --git a/plugins/filter/hashids.py b/plugins/filter/hashids.py index 45fba83c03..c58ae4d70b 100644 --- a/plugins/filter/hashids.py +++ b/plugins/filter/hashids.py @@ -1,21 +1,23 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2021, Andrew Pantuso (@ajpantuso) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations from ansible.errors import ( AnsibleError, AnsibleFilterError, - AnsibleFilterTypeError, ) from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.common.collections import is_sequence +try: + from ansible.errors import AnsibleTypeError +except ImportError: + from ansible.errors import AnsibleFilterTypeError as AnsibleTypeError + try: from hashids import Hashids HAS_HASHIDS = True @@ -27,7 +29,7 @@ def initialize_hashids(**kwargs): if not HAS_HASHIDS: raise AnsibleError("The hashids library must be installed in order to use this plugin") - params = dict((k, v) for k, v in kwargs.items() if v) + params = {k: v for k, v in kwargs.items() if v} try: return Hashids(**params) @@ -64,9 +66,7 @@ def hashids_encode(nums, salt=None, alphabet=None, min_length=None): try: hashid = hashids.encode(*nums) except TypeError as e: - raise AnsibleFilterTypeError( - "Data to encode must by a tuple or list of ints: %s" % to_native(e) - ) + raise AnsibleTypeError(f"Data to encode must by a tuple or list of ints: {e}") return hashid diff --git a/plugins/filter/jc.py b/plugins/filter/jc.py index 6708f573d3..92996e812c 100644 --- a/plugins/filter/jc.py +++ b/plugins/filter/jc.py @@ -1,48 +1,46 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2015, Filipe Niero Felisbino # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later # # contributed by Kelly Brazil -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: jc - short_description: Convert output of many shell commands and file-types to JSON - version_added: 1.1.0 - author: Kelly Brazil (@kellyjonbrazil) - description: - - Convert output of many shell commands and file-types to JSON. - - Uses the L(jc library,https://github.com/kellyjonbrazil/jc). - positional: parser - options: - _input: - description: The data to convert. - type: string - required: true - parser: - description: - - The correct parser for the input data. - - For example C(ifconfig). - - "Note: use underscores instead of dashes (if any) in the parser module name." - - See U(https://github.com/kellyjonbrazil/jc#parsers) for the latest list of parsers. - type: string - required: true - quiet: - description: Set to C(false) to not suppress warnings. - type: boolean - default: true - raw: - description: Set to C(true) to return pre-processed JSON. - type: boolean - default: false - requirements: - - jc installed as a Python library (U(https://pypi.org/project/jc/)) -''' +DOCUMENTATION = r""" +name: jc +short_description: Convert output of many shell commands and file-types to JSON +version_added: 1.1.0 +author: Kelly Brazil (@kellyjonbrazil) +description: + - Convert output of many shell commands and file-types to JSON. + - Uses the L(jc library,https://github.com/kellyjonbrazil/jc). +positional: parser +options: + _input: + description: The data to convert. + type: string + required: true + parser: + description: + - The correct parser for the input data. + - For example V(ifconfig). + - 'Note: use underscores instead of dashes (if any) in the parser module name.' + - See U(https://github.com/kellyjonbrazil/jc#parsers) for the latest list of parsers. + type: string + required: true + quiet: + description: Set to V(false) to not suppress warnings. + type: boolean + default: true + raw: + description: Set to V(true) to return pre-processed JSON. + type: boolean + default: false +requirements: + - jc installed as a Python library (U(https://pypi.org/project/jc/)) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install the prereqs of the jc filter (jc Python package) on the Ansible controller delegate_to: localhost ansible.builtin.pip: @@ -68,13 +66,13 @@ EXAMPLES = ''' # "operating_system": "GNU/Linux", # "processor": "x86_64" # } -''' +""" -RETURN = ''' - _value: - description: The processed output. - type: any -''' +RETURN = r""" +_value: + description: The processed output. + type: any +""" from ansible.errors import AnsibleError, AnsibleFilterError import importlib @@ -86,7 +84,7 @@ except ImportError: HAS_LIB = False -def jc(data, parser, quiet=True, raw=False): +def jc_filter(data, parser, quiet=True, raw=False): """Convert returned command output to JSON using the JC library Arguments: @@ -138,11 +136,17 @@ def jc(data, parser, quiet=True, raw=False): raise AnsibleError('You need to install "jc" as a Python library on the Ansible controller prior to running jc filter') try: - jc_parser = importlib.import_module('jc.parsers.' + parser) - return jc_parser.parse(data, quiet=quiet, raw=raw) + # new API (jc v1.18.0 and higher) allows use of plugin parsers + if hasattr(jc, 'parse'): + return jc.parse(parser, data, quiet=quiet, raw=raw) + + # old API (jc v1.17.7 and lower) + else: + jc_parser = importlib.import_module(f'jc.parsers.{parser}') + return jc_parser.parse(data, quiet=quiet, raw=raw) except Exception as e: - raise AnsibleFilterError('Error in jc filter plugin: %s' % e) + raise AnsibleFilterError(f'Error in jc filter plugin: {e}') class FilterModule(object): @@ -150,5 +154,5 @@ class FilterModule(object): def filters(self): return { - 'jc': jc + 'jc': jc_filter, } diff --git a/plugins/filter/json_diff.yml b/plugins/filter/json_diff.yml new file mode 100644 index 0000000000..a370564d7a --- /dev/null +++ b/plugins/filter/json_diff.yml @@ -0,0 +1,56 @@ +--- +# Copyright (c) Stanislav Meduna (@numo68) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: json_diff + short_description: Create a JSON patch by comparing two JSON files + description: + - This filter compares the input with the argument and computes a list of operations + that can be consumed by the P(community.general.json_patch_recipe#filter) to change the input + to the argument. + requirements: + - jsonpatch + version_added: 10.3.0 + author: + - Stanislav Meduna (@numo68) + positional: target + options: + _input: + description: A list or a dictionary representing a source JSON object, or a string containing a JSON object. + type: raw + required: true + target: + description: A list or a dictionary representing a target JSON object, or a string containing a JSON object. + type: raw + required: true + seealso: + - name: RFC 6902 + description: JavaScript Object Notation (JSON) Patch + link: https://datatracker.ietf.org/doc/html/rfc6902 + - name: RFC 6901 + description: JavaScript Object Notation (JSON) Pointer + link: https://datatracker.ietf.org/doc/html/rfc6901 + - name: jsonpatch Python Package + description: A Python library for applying JSON patches + link: https://pypi.org/project/jsonpatch/ + +RETURN: + _value: + description: A list of JSON patch operations to apply. + type: list + elements: dict + +EXAMPLES: | + - name: Compute a difference + ansible.builtin.debug: + msg: "{{ input | community.general.json_diff(target) }}" + vars: + input: {"foo": 1, "bar":{"baz": 2}, "baw": [1, 2, 3], "hello": "day"} + target: {"foo": 1, "bar": {"baz": 2}, "baw": [1, 3], "baq": {"baz": 2}, "hello": "night"} + # => [ + # {"op": "add", "path": "/baq", "value": {"baz": 2}}, + # {"op": "remove", "path": "/baw/1"}, + # {"op": "replace", "path": "/hello", "value": "night"} + # ] diff --git a/plugins/filter/json_patch.py b/plugins/filter/json_patch.py new file mode 100644 index 0000000000..8cd6bd08b0 --- /dev/null +++ b/plugins/filter/json_patch.py @@ -0,0 +1,193 @@ +# Copyright (c) Stanislav Meduna (@numo68) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations +from json import loads +from typing import TYPE_CHECKING +from ansible.errors import AnsibleFilterError + + +if TYPE_CHECKING: + from typing import Any, Callable, Union + +try: + import jsonpatch + +except ImportError as exc: + HAS_LIB = False + JSONPATCH_IMPORT_ERROR = exc +else: + HAS_LIB = True + JSONPATCH_IMPORT_ERROR = None + +OPERATIONS_AVAILABLE = ["add", "copy", "move", "remove", "replace", "test"] +OPERATIONS_NEEDING_FROM = ["copy", "move"] +OPERATIONS_NEEDING_VALUE = ["add", "replace", "test"] + + +class FilterModule: + """Filter plugin.""" + + def check_json_object(self, filter_name: str, object_name: str, inp: Any): + if isinstance(inp, (str, bytes, bytearray)): + try: + return loads(inp) + except Exception as e: + raise AnsibleFilterError( + f"{filter_name}: could not decode JSON from {object_name}: {e}" + ) from e + + if not isinstance(inp, (list, dict)): + raise AnsibleFilterError( + f"{filter_name}: {object_name} is not dictionary, list or string" + ) + + return inp + + def check_patch_arguments(self, filter_name: str, args: dict): + + if "op" not in args or not isinstance(args["op"], str): + raise AnsibleFilterError(f"{filter_name}: 'op' argument is not a string") + + if args["op"] not in OPERATIONS_AVAILABLE: + raise AnsibleFilterError( + f"{filter_name}: unsupported 'op' argument: {args['op']}" + ) + + if "path" not in args or not isinstance(args["path"], str): + raise AnsibleFilterError(f"{filter_name}: 'path' argument is not a string") + + if args["op"] in OPERATIONS_NEEDING_FROM: + if "from" not in args: + raise AnsibleFilterError( + f"{filter_name}: 'from' argument missing for '{args['op']}' operation" + ) + if not isinstance(args["from"], str): + raise AnsibleFilterError( + f"{filter_name}: 'from' argument is not a string" + ) + + def json_patch( + self, + inp: Union[str, list, dict, bytes, bytearray], + op: str, + path: str, + value: Any = None, + **kwargs: dict, + ) -> Any: + + if not HAS_LIB: + raise AnsibleFilterError( + "You need to install 'jsonpatch' package prior to running 'json_patch' filter" + ) from JSONPATCH_IMPORT_ERROR + + args = {"op": op, "path": path} + from_arg = kwargs.pop("from", None) + fail_test = kwargs.pop("fail_test", False) + + if kwargs: + raise AnsibleFilterError( + f"json_patch: unexpected keywords arguments: {', '.join(sorted(kwargs))}" + ) + + if not isinstance(fail_test, bool): + raise AnsibleFilterError("json_patch: 'fail_test' argument is not a bool") + + if op in OPERATIONS_NEEDING_VALUE: + args["value"] = value + if op in OPERATIONS_NEEDING_FROM and from_arg is not None: + args["from"] = from_arg + + inp = self.check_json_object("json_patch", "input", inp) + self.check_patch_arguments("json_patch", args) + + result = None + + try: + result = jsonpatch.apply_patch(inp, [args]) + except jsonpatch.JsonPatchTestFailed as e: + if fail_test: + raise AnsibleFilterError( + f"json_patch: test operation failed: {e}" + ) from e + else: + pass + except Exception as e: + raise AnsibleFilterError(f"json_patch: patch failed: {e}") from e + + return result + + def json_patch_recipe( + self, + inp: Union[str, list, dict, bytes, bytearray], + operations: list, + /, + fail_test: bool = False, + ) -> Any: + + if not HAS_LIB: + raise AnsibleFilterError( + "You need to install 'jsonpatch' package prior to running 'json_patch_recipe' filter" + ) from JSONPATCH_IMPORT_ERROR + + if not isinstance(operations, list): + raise AnsibleFilterError( + "json_patch_recipe: 'operations' needs to be a list" + ) + + if not isinstance(fail_test, bool): + raise AnsibleFilterError("json_patch: 'fail_test' argument is not a bool") + + result = None + + inp = self.check_json_object("json_patch_recipe", "input", inp) + for args in operations: + self.check_patch_arguments("json_patch_recipe", args) + + try: + result = jsonpatch.apply_patch(inp, operations) + except jsonpatch.JsonPatchTestFailed as e: + if fail_test: + raise AnsibleFilterError( + f"json_patch_recipe: test operation failed: {e}" + ) from e + else: + pass + except Exception as e: + raise AnsibleFilterError(f"json_patch_recipe: patch failed: {e}") from e + + return result + + def json_diff( + self, + inp: Union[str, list, dict, bytes, bytearray], + target: Union[str, list, dict, bytes, bytearray], + ) -> list: + + if not HAS_LIB: + raise AnsibleFilterError( + "You need to install 'jsonpatch' package prior to running 'json_diff' filter" + ) from JSONPATCH_IMPORT_ERROR + + inp = self.check_json_object("json_diff", "input", inp) + target = self.check_json_object("json_diff", "target", target) + + try: + result = list(jsonpatch.make_patch(inp, target)) + except Exception as e: + raise AnsibleFilterError(f"JSON diff failed: {e}") from e + + return result + + def filters(self) -> dict[str, Callable[..., Any]]: + """Map filter plugin names to their functions. + + Returns: + dict: The filter plugin functions. + """ + return { + "json_patch": self.json_patch, + "json_patch_recipe": self.json_patch_recipe, + "json_diff": self.json_diff, + } diff --git a/plugins/filter/json_patch.yml b/plugins/filter/json_patch.yml new file mode 100644 index 0000000000..42a0309202 --- /dev/null +++ b/plugins/filter/json_patch.yml @@ -0,0 +1,145 @@ +--- +# Copyright (c) Stanislav Meduna (@numo68) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: json_patch + short_description: Apply a JSON-Patch (RFC 6902) operation to an object + description: + - This filter applies a single JSON patch operation and returns a modified object. + - If the operation is a test, the filter returns an ummodified object if the test + succeeded and a V(none) value otherwise. + requirements: + - jsonpatch + version_added: 10.3.0 + author: + - Stanislav Meduna (@numo68) + positional: op, path, value + options: + _input: + description: A list or a dictionary representing a JSON object, or a string containing a JSON object. + type: raw + required: true + op: + description: Operation to perform (see L(RFC 6902, https://datatracker.ietf.org/doc/html/rfc6902)). + type: str + choices: [add, copy, move, remove, replace, test] + required: true + path: + description: JSON Pointer path to the target location (see L(RFC 6901, https://datatracker.ietf.org/doc/html/rfc6901)). + type: str + required: true + value: + description: Value to use in the operation. Ignored for O(op=copy), O(op=move), and O(op=remove). + type: raw + from: + description: The source location for the copy and move operation. Mandatory + for O(op=copy) and O(op=move), ignored otherwise. + type: str + fail_test: + description: If V(false), a failed O(op=test) will return V(none). If V(true), the filter + invocation will fail with an error. + type: bool + default: false + seealso: + - name: RFC 6902 + description: JavaScript Object Notation (JSON) Patch + link: https://datatracker.ietf.org/doc/html/rfc6902 + - name: RFC 6901 + description: JavaScript Object Notation (JSON) Pointer + link: https://datatracker.ietf.org/doc/html/rfc6901 + - name: jsonpatch Python Package + description: A Python library for applying JSON patches + link: https://pypi.org/project/jsonpatch/ + +RETURN: + _value: + description: A modified object or V(none) if O(op=test), O(fail_test=false) and the test failed. + type: any + returned: always + +EXAMPLES: | + - name: Insert a new element into an array at a specified index + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('add', '/1', {'baz': 'qux'}) }}" + vars: + input: ["foo": { "one": 1 }, "bar": { "two": 2 }] + # => [{"foo": {"one": 1}}, {"baz": "qux"}, {"bar": {"two": 2}}] + + - name: Insert a new key into a dictionary + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('add', '/bar/baz', 'qux') }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => {"foo": {"one": 1}, "bar": {"baz": "qux", "two": 2}} + + - name: Input is a string + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('add', '/baz', 3) }}" + vars: + input: '{ "foo": { "one": 1 }, "bar": { "two": 2 } }' + # => {"foo": {"one": 1}, "bar": { "two": 2 }, "baz": 3} + + - name: Existing key is replaced + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('add', '/bar', 'qux') }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => {"foo": {"one": 1}, "bar": "qux"} + + - name: Escaping tilde as ~0 and slash as ~1 in the path + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('add', '/~0~1', 'qux') }}" + vars: + input: {} + # => {"~/": "qux"} + + - name: Add at the end of the array + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('add', '/-', 4) }}" + vars: + input: [1, 2, 3] + # => [1, 2, 3, 4] + + - name: Remove a key + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('remove', '/bar') }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => {"foo": {"one": 1} } + + - name: Replace a value + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('replace', '/bar', 2) }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => {"foo": {"one": 1}, "bar": 2} + + - name: Copy a value + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('copy', '/baz', from='/bar') }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => {"foo": {"one": 1}, "bar": { "two": 2 }, "baz": { "two": 2 }} + + - name: Move a value + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('move', '/baz', from='/bar') }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => {"foo": {"one": 1}, "baz": { "two": 2 }} + + - name: Successful test + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('test', '/bar/two', 2) | ternary('OK', 'Failed') }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => OK + + - name: Unuccessful test + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch('test', '/bar/two', 9) | ternary('OK', 'Failed') }}" + vars: + input: { "foo": { "one": 1 }, "bar": { "two": 2 } } + # => Failed diff --git a/plugins/filter/json_patch_recipe.yml b/plugins/filter/json_patch_recipe.yml new file mode 100644 index 0000000000..671600b941 --- /dev/null +++ b/plugins/filter/json_patch_recipe.yml @@ -0,0 +1,102 @@ +--- +# Copyright (c) Stanislav Meduna (@numo68) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: json_patch_recipe + short_description: Apply JSON-Patch (RFC 6902) operations to an object + description: + - This filter sequentially applies JSON patch operations and returns a modified object. + - If there is a test operation in the list, the filter continues if the test + succeeded and returns a V(none) value otherwise. + requirements: + - jsonpatch + version_added: 10.3.0 + author: + - Stanislav Meduna (@numo68) + positional: operations, fail_test + options: + _input: + description: A list or a dictionary representing a JSON object, or a string containing a JSON object. + type: raw + required: true + operations: + description: A list of JSON patch operations to apply. + type: list + elements: dict + required: true + suboptions: + op: + description: Operation to perform (see L(RFC 6902, https://datatracker.ietf.org/doc/html/rfc6902)). + type: str + choices: [add, copy, move, remove, replace, test] + required: true + path: + description: JSON Pointer path to the target location (see L(RFC 6901, https://datatracker.ietf.org/doc/html/rfc6901)). + type: str + required: true + value: + description: Value to use in the operation. Ignored for O(operations[].op=copy), O(operations[].op=move), and O(operations[].op=remove). + type: raw + from: + description: The source location for the copy and move operation. Mandatory + for O(operations[].op=copy) and O(operations[].op=move), ignored otherwise. + type: str + fail_test: + description: If V(false), a failed O(operations[].op=test) will return V(none). If V(true), the filter + invocation will fail with an error. + type: bool + default: false + seealso: + - name: RFC 6902 + description: JavaScript Object Notation (JSON) Patch + link: https://datatracker.ietf.org/doc/html/rfc6902 + - name: RFC 6901 + description: JavaScript Object Notation (JSON) Pointer + link: https://datatracker.ietf.org/doc/html/rfc6901 + - name: jsonpatch Python Package + description: A Python library for applying JSON patches + link: https://pypi.org/project/jsonpatch/ + +RETURN: + _value: + description: A modified object or V(none) if O(operations[].op=test), O(fail_test=false) + and the test failed. + type: any + returned: always + +EXAMPLES: | + - name: Apply a series of operations + ansible.builtin.debug: + msg: "{{ input | community.general.json_patch_recipe(operations) }}" + vars: + input: {} + operations: + - op: 'add' + path: '/foo' + value: 1 + - op: 'add' + path: '/bar' + value: [] + - op: 'add' + path: '/bar/-' + value: 2 + - op: 'add' + path: '/bar/0' + value: 1 + - op: 'remove' + path: '/bar/0' + - op: 'move' + from: '/foo' + path: '/baz' + - op: 'copy' + from: '/baz' + path: '/bax' + - op: 'copy' + from: '/baz' + path: '/bay' + - op: 'replace' + path: '/baz' + value: [10, 20, 30] + # => {"bar":[2],"bax":1,"bay":1,"baz":[10,20,30]} diff --git a/plugins/filter/json_query.py b/plugins/filter/json_query.py index 9e8fa4ef2e..e040a4aca2 100644 --- a/plugins/filter/json_query.py +++ b/plugins/filter/json_query.py @@ -1,34 +1,32 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2015, Filipe Niero Felisbino # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: json_query - short_description: Select a single element or a data subset from a complex data structure - description: - - This filter lets you query a complex JSON structure and iterate over it using a loop structure. - positional: expr - options: - _input: - description: - - The JSON data to query. - type: any - required: true - expr: - description: - - The query expression. - - See U(http://jmespath.org/examples.html) for examples. - type: string - required: true - requirements: - - jmespath -''' +DOCUMENTATION = r""" +name: json_query +short_description: Select a single element or a data subset from a complex data structure +description: + - This filter lets you query a complex JSON structure and iterate over it using a loop structure. +positional: expr +options: + _input: + description: + - The JSON data to query. + type: any + required: true + expr: + description: + - The query expression. + - See U(http://jmespath.org/examples.html) for examples. + type: string + required: true +requirements: + - jmespath +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Define data to work on in the examples below ansible.builtin.set_fact: domain_definition: @@ -99,13 +97,13 @@ EXAMPLES = ''' msg: "{{ domain_definition | to_json | from_json | community.general.json_query(server_name_query) }}" vars: server_name_query: "domain.server[?contains(name,'server1')].port" -''' +""" -RETURN = ''' - _value: - description: The result of the query. - type: any -''' +RETURN = r""" +_value: + description: The result of the query. + type: any +""" from ansible.errors import AnsibleError, AnsibleFilterError @@ -125,17 +123,24 @@ def json_query(data, expr): 'json_query filter') # Hack to handle Ansible Unsafe text, AnsibleMapping and AnsibleSequence - # See issue: https://github.com/ansible-collections/community.general/issues/320 - jmespath.functions.REVERSE_TYPES_MAP['string'] = jmespath.functions.REVERSE_TYPES_MAP['string'] + ('AnsibleUnicode', 'AnsibleUnsafeText', ) - jmespath.functions.REVERSE_TYPES_MAP['array'] = jmespath.functions.REVERSE_TYPES_MAP['array'] + ('AnsibleSequence', ) - jmespath.functions.REVERSE_TYPES_MAP['object'] = jmespath.functions.REVERSE_TYPES_MAP['object'] + ('AnsibleMapping', ) + # See issues https://github.com/ansible-collections/community.general/issues/320 + # and https://github.com/ansible/ansible/issues/85600. + jmespath.functions.REVERSE_TYPES_MAP['string'] = jmespath.functions.REVERSE_TYPES_MAP['string'] + ( + 'AnsibleUnicode', 'AnsibleUnsafeText', '_AnsibleTaggedStr', + ) + jmespath.functions.REVERSE_TYPES_MAP['array'] = jmespath.functions.REVERSE_TYPES_MAP['array'] + ( + 'AnsibleSequence', '_AnsibleLazyTemplateList', + ) + jmespath.functions.REVERSE_TYPES_MAP['object'] = jmespath.functions.REVERSE_TYPES_MAP['object'] + ( + 'AnsibleMapping', '_AnsibleLazyTemplateDict', + ) try: return jmespath.search(expr, data) except jmespath.exceptions.JMESPathError as e: - raise AnsibleFilterError('JMESPathError in json_query filter plugin:\n%s' % e) + raise AnsibleFilterError(f'JMESPathError in json_query filter plugin:\n{e}') except Exception as e: # For older jmespath, we can get ValueError and TypeError without much info. - raise AnsibleFilterError('Error in jmespath.search in json_query filter plugin:\n%s' % e) + raise AnsibleFilterError(f'Error in jmespath.search in json_query filter plugin:\n{e}') class FilterModule(object): diff --git a/plugins/filter/keep_keys.py b/plugins/filter/keep_keys.py new file mode 100644 index 0000000000..18876789d6 --- /dev/null +++ b/plugins/filter/keep_keys.py @@ -0,0 +1,136 @@ +# Copyright (c) 2024 Vladimir Botka +# Copyright (c) 2024 Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: keep_keys +short_description: Keep specific keys from dictionaries in a list +version_added: "9.1.0" +author: + - Vladimir Botka (@vbotka) + - Felix Fontein (@felixfontein) +description: This filter keeps only specified keys from a provided list of dictionaries. +options: + _input: + description: + - A list of dictionaries. + - Top level keys must be strings. + type: list + elements: dictionary + required: true + target: + description: + - A single key or key pattern to keep, or a list of keys or keys patterns to keep. + - If O(matching_parameter=regex) there must be exactly one pattern provided. + type: raw + required: true + matching_parameter: + description: Specify the matching option of target keys. + type: str + default: equal + choices: + equal: Matches keys of exactly one of the O(target) items. + starts_with: Matches keys that start with one of the O(target) items. + ends_with: Matches keys that end with one of the O(target) items. + regex: + - Matches keys that match the regular expresion provided in O(target). + - In this case, O(target) must be a regex string or a list with single regex string. +""" + +EXAMPLES = r""" +- l: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + + # 1) By default match keys that equal any of the items in the target. +- t: [k0_x0, k1_x1] + r: "{{ l | community.general.keep_keys(target=t) }}" + + # 2) Match keys that start with any of the items in the target. +- t: [k0, k1] + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='starts_with') }}" + + # 3) Match keys that end with any of the items in target. +- t: [x0, x1] + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='ends_with') }}" + + # 4) Match keys by the regex. +- t: ['^.*[01]_x.*$'] + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='regex') }}" + + # 5) Match keys by the regex. +- t: '^.*[01]_x.*$' + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='regex') }}" + + # The results of above examples 1-5 are all the same. +- r: + - {k0_x0: A0, k1_x1: B0} + - {k0_x0: A1, k1_x1: B1} + + # 6) By default match keys that equal the target. +- t: k0_x0 + r: "{{ l | community.general.keep_keys(target=t) }}" + + # 7) Match keys that start with the target. +- t: k0 + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='starts_with') }}" + + # 8) Match keys that end with the target. +- t: x0 + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='ends_with') }}" + + # 9) Match keys by the regex. +- t: '^.*0_x.*$' + r: "{{ l | community.general.keep_keys(target=t, matching_parameter='regex') }}" + + # The results of above examples 6-9 are all the same. +- r: + - {k0_x0: A0} + - {k0_x0: A1} +""" + +RETURN = r""" +_value: + description: The list of dictionaries with selected keys. + type: list + elements: dictionary +""" + +from ansible_collections.community.general.plugins.plugin_utils.keys_filter import ( + _keys_filter_params, + _keys_filter_target_str) + + +def keep_keys(data, target=None, matching_parameter='equal'): + """keep specific keys from dictionaries in a list""" + + # test parameters + _keys_filter_params(data, matching_parameter) + # test and transform target + tt = _keys_filter_target_str(target, matching_parameter) + + if matching_parameter == 'equal': + def keep_key(key): + return key in tt + elif matching_parameter == 'starts_with': + def keep_key(key): + return key.startswith(tt) + elif matching_parameter == 'ends_with': + def keep_key(key): + return key.endswith(tt) + elif matching_parameter == 'regex': + def keep_key(key): + return tt.match(key) is not None + + return [{k: v for k, v in d.items() if keep_key(k)} for d in data] + + +class FilterModule(object): + + def filters(self): + return { + 'keep_keys': keep_keys, + } diff --git a/plugins/filter/lists.py b/plugins/filter/lists.py new file mode 100644 index 0000000000..0bae08f24c --- /dev/null +++ b/plugins/filter/lists.py @@ -0,0 +1,200 @@ +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from ansible.errors import AnsibleFilterError +from ansible.module_utils.common.collections import is_sequence + + +def remove_duplicates(lst): + seen = set() + seen_add = seen.add + result = [] + for item in lst: + try: + if item not in seen: + seen_add(item) + result.append(item) + except TypeError: + # This happens for unhashable values `item`. If this happens, + # convert `seen` to a list and continue. + seen = list(seen) + seen_add = seen.append + if item not in seen: + seen_add(item) + result.append(item) + return result + + +def flatten_list(lst): + result = [] + for sublist in lst: + if not is_sequence(sublist): + msg = ("All arguments must be lists. %s is %s") + raise AnsibleFilterError(msg % (sublist, type(sublist))) + if len(sublist) > 0: + if all(is_sequence(sub) for sub in sublist): + for item in sublist: + result.append(item) + else: + result.append(sublist) + return result + + +def lists_union(*args, **kwargs): + lists = args + flatten = kwargs.pop('flatten', False) + + if kwargs: + # Some unused kwargs remain + raise AnsibleFilterError( + f"lists_union() got unexpected keywords arguments: {', '.join(kwargs.keys())}" + ) + + if flatten: + lists = flatten_list(args) + + if not lists: + return [] + + if len(lists) == 1: + return lists[0] + + a = lists[0] + for b in lists[1:]: + a = do_union(a, b) + return remove_duplicates(a) + + +def do_union(a, b): + return a + b + + +def lists_intersect(*args, **kwargs): + lists = args + flatten = kwargs.pop('flatten', False) + + if kwargs: + # Some unused kwargs remain + raise AnsibleFilterError( + f"lists_intersect() got unexpected keywords arguments: {', '.join(kwargs.keys())}" + ) + + if flatten: + lists = flatten_list(args) + + if not lists: + return [] + + if len(lists) == 1: + return lists[0] + + a = remove_duplicates(lists[0]) + for b in lists[1:]: + a = do_intersect(a, b) + return a + + +def do_intersect(a, b): + isect = [] + try: + other = set(b) + isect = [item for item in a if item in other] + except TypeError: + # This happens for unhashable values, + # use a list instead and redo. + other = list(b) + isect = [item for item in a if item in other] + return isect + + +def lists_difference(*args, **kwargs): + lists = args + flatten = kwargs.pop('flatten', False) + + if kwargs: + # Some unused kwargs remain + raise AnsibleFilterError( + f"lists_difference() got unexpected keywords arguments: {', '.join(kwargs.keys())}" + ) + + if flatten: + lists = flatten_list(args) + + if not lists: + return [] + + if len(lists) == 1: + return lists[0] + + a = remove_duplicates(lists[0]) + for b in lists[1:]: + a = do_difference(a, b) + return a + + +def do_difference(a, b): + diff = [] + try: + other = set(b) + diff = [item for item in a if item not in other] + except TypeError: + # This happens for unhashable values, + # use a list instead and redo. + other = list(b) + diff = [item for item in a if item not in other] + return diff + + +def lists_symmetric_difference(*args, **kwargs): + lists = args + flatten = kwargs.pop('flatten', False) + + if kwargs: + # Some unused kwargs remain + raise AnsibleFilterError( + f"lists_difference() got unexpected keywords arguments: {', '.join(kwargs.keys())}" + ) + + if flatten: + lists = flatten_list(args) + + if not lists: + return [] + + if len(lists) == 1: + return lists[0] + + a = lists[0] + for b in lists[1:]: + a = do_symmetric_difference(a, b) + return a + + +def do_symmetric_difference(a, b): + sym_diff = [] + union = lists_union(a, b) + try: + isect = set(a) & set(b) + sym_diff = [item for item in union if item not in isect] + except TypeError: + # This happens for unhashable values, + # build the intersection of `a` and `b` backed + # by a list instead of a set and redo. + isect = lists_intersect(a, b) + sym_diff = [item for item in union if item not in isect] + return sym_diff + + +class FilterModule(object): + ''' Ansible lists jinja2 filters ''' + + def filters(self): + return { + 'lists_union': lists_union, + 'lists_intersect': lists_intersect, + 'lists_difference': lists_difference, + 'lists_symmetric_difference': lists_symmetric_difference, + } diff --git a/plugins/filter/lists_difference.yml b/plugins/filter/lists_difference.yml new file mode 100644 index 0000000000..630e77cf0a --- /dev/null +++ b/plugins/filter/lists_difference.yml @@ -0,0 +1,48 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: lists_difference + short_description: Difference of lists with a predictive order + version_added: 8.4.0 + description: + - Provide a unique list of all the elements from the first which do not appear in the other lists. + - The order of the items in the resulting list is preserved. + options: + _input: + description: A list. + type: list + elements: any + required: true + flatten: + description: Whether to remove one hierarchy level from the input list. + type: boolean + default: false + author: + - Christoph Fiehe (@cfiehe) + +EXAMPLES: | + - name: Return the difference of list1 and list2. + ansible.builtin.debug: + msg: "{{ list1 | community.general.lists_difference(list2) }}" + vars: + list1: [1, 2, 5, 3, 4, 10] + list2: [1, 2, 3, 4, 5, 11, 99] + # => [10] + + - name: Return the difference of list1, list2 and list3. + ansible.builtin.debug: + msg: "{{ [list1, list2, list3] | community.general.lists_difference(flatten=true) }}" + vars: + list1: [1, 2, 5, 3, 4, 10] + list2: [1, 2, 3, 4, 5, 11, 99] + list3: [1, 2, 3, 4, 5, 10, 99, 101] + # => [] + +RETURN: + _value: + description: A unique list of all the elements from the first list that do not appear on the other lists. + type: list + elements: any diff --git a/plugins/filter/lists_intersect.yml b/plugins/filter/lists_intersect.yml new file mode 100644 index 0000000000..d2ea9483b1 --- /dev/null +++ b/plugins/filter/lists_intersect.yml @@ -0,0 +1,48 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: lists_intersect + short_description: Intersection of lists with a predictive order + version_added: 8.4.0 + description: + - Provide a unique list of all the common elements of two or more lists. + - The order of the items in the resulting list is preserved. + options: + _input: + description: A list. + type: list + elements: any + required: true + flatten: + description: Whether to remove one hierarchy level from the input list. + type: boolean + default: false + author: + - Christoph Fiehe (@cfiehe) + +EXAMPLES: | + - name: Return the intersection of list1 and list2. + ansible.builtin.debug: + msg: "{{ list1 | community.general.lists_intersect(list2) }}" + vars: + list1: [1, 2, 5, 3, 4, 10] + list2: [1, 2, 3, 4, 5, 11, 99] + # => [1, 2, 5, 3, 4] + + - name: Return the intersection of list1, list2 and list3. + ansible.builtin.debug: + msg: "{{ [list1, list2, list3] | community.general.lists_intersect(flatten=true) }}" + vars: + list1: [1, 2, 5, 3, 4, 10] + list2: [1, 2, 3, 4, 5, 11, 99] + list3: [1, 2, 3, 4, 5, 10, 99, 101] + # => [1, 2, 5, 3, 4] + +RETURN: + _value: + description: A unique list of all the common elements from the provided lists. + type: list + elements: any diff --git a/plugins/filter/lists_mergeby.py b/plugins/filter/lists_mergeby.py index a89039ed89..4b8bf971f4 100644 --- a/plugins/filter/lists_mergeby.py +++ b/plugins/filter/lists_mergeby.py @@ -1,122 +1,216 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2020-2022, Vladimir Botka +# Copyright (c) 2020-2024, Vladimir Botka # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: lists_mergeby - short_description: Merge two or more lists of dictionaries by a given attribute - version_added: 2.0.0 - author: Vladimir Botka (@vbotka) - description: - - Merge two or more lists by attribute I(index). Optional parameters 'recursive' and 'list_merge' - control the merging of the lists in values. The function merge_hash from ansible.utils.vars - is used. To learn details on how to use the parameters 'recursive' and 'list_merge' see - Ansible User's Guide chapter "Using filters to manipulate data" section "Combining - hashes/dictionaries". - positional: another_list, index - options: - _input: - description: A list of dictionaries. - type: list - elements: dictionary - required: true - another_list: - description: Another list of dictionaries. This parameter can be specified multiple times. - type: list - elements: dictionary - index: - description: - - The dictionary key that must be present in every dictionary in every list that is used to - merge the lists. - type: string - required: true - recursive: - description: - - Should the combine recursively merge nested dictionaries (hashes). - - "B(Note:) It does not depend on the value of the C(hash_behaviour) setting in C(ansible.cfg)." - type: boolean - default: false - list_merge: - description: - - Modifies the behaviour when the dictionaries (hashes) to merge contain arrays/lists. - type: string - default: replace - choices: - - replace - - keep - - append - - prepend - - append_rp - - prepend_rp -''' +DOCUMENTATION = r""" +name: lists_mergeby +short_description: Merge two or more lists of dictionaries by a given attribute +version_added: 2.0.0 +author: Vladimir Botka (@vbotka) +description: + - Merge two or more lists by attribute O(index). Optional parameters O(recursive) and O(list_merge) control the merging + of the nested dictionaries and lists. + - The function C(merge_hash) from C(ansible.utils.vars) is used. + - To learn details on how to use the parameters O(recursive) and O(list_merge) see Ansible User's Guide chapter "Using filters + to manipulate data" section R(Combining hashes/dictionaries, combine_filter) or the filter P(ansible.builtin.combine#filter). +positional: another_list, index +options: + _input: + description: + - A list of dictionaries, or a list of lists of dictionaries. + - The required type of the C(elements) is set to C(raw) because all elements of O(_input) can be either dictionaries + or lists. + type: list + elements: raw + required: true + another_list: + description: + - Another list of dictionaries, or a list of lists of dictionaries. + - This parameter can be specified multiple times. + type: list + elements: raw + index: + description: + - The dictionary key that must be present in every dictionary in every list that is used to merge the lists. + type: string + required: true + recursive: + description: + - Should the combine recursively merge nested dictionaries (hashes). + - B(Note:) It does not depend on the value of the C(hash_behaviour) setting in C(ansible.cfg). + type: boolean + default: false + list_merge: + description: + - Modifies the behaviour when the dictionaries (hashes) to merge contain arrays/lists. + type: string + default: replace + choices: + - replace + - keep + - append + - prepend + - append_rp + - prepend_rp +""" -EXAMPLES = ''' -- name: Merge two lists +EXAMPLES = r""" +# Some results below are manually formatted for better readability. The +# dictionaries' keys will be sorted alphabetically in real output. + +- name: Example 1. Merge two lists. The results r1 and r2 are the same. ansible.builtin.debug: - msg: >- - {{ list1 | community.general.lists_mergeby( - list2, - 'index', - recursive=True, - list_merge='append' - ) }}" + msg: | + r1: {{ r1 }} + r2: {{ r2 }} vars: list1: - - index: a - value: 123 - - index: b - value: 42 + - {index: a, value: 123} + - {index: b, value: 4} list2: - - index: a - foo: bar - - index: c - foo: baz - # Produces the following list of dictionaries: - # { - # "index": "a", - # "foo": "bar", - # "value": 123 - # }, - # { - # "index": "b", - # "value": 42 - # }, - # { - # "index": "c", - # "foo": "baz" - # } -''' + - {index: a, foo: bar} + - {index: c, foo: baz} + r1: "{{ list1 | community.general.lists_mergeby(list2, 'index') }}" + r2: "{{ [list1, list2] | community.general.lists_mergeby('index') }}" -RETURN = ''' - _value: - description: The merged list. - type: list - elements: dictionary -''' +# r1: +# - {index: a, foo: bar, value: 123} +# - {index: b, value: 4} +# - {index: c, foo: baz} +# r2: +# - {index: a, foo: bar, value: 123} +# - {index: b, value: 4} +# - {index: c, foo: baz} + +- name: Example 2. Merge three lists + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, value: 123} + - {index: b, value: 4} + list2: + - {index: a, foo: bar} + - {index: c, foo: baz} + list3: + - {index: d, foo: qux} + r: "{{ [list1, list2, list3] | community.general.lists_mergeby('index') }}" + +# r: +# - {index: a, foo: bar, value: 123} +# - {index: b, value: 4} +# - {index: c, foo: baz} +# - {index: d, foo: qux} + +- name: Example 3. Merge single list. The result is the same as 2. + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, value: 123} + - {index: b, value: 4} + - {index: a, foo: bar} + - {index: c, foo: baz} + - {index: d, foo: qux} + r: "{{ [list1, []] | community.general.lists_mergeby('index') }}" + +# r: +# - {index: a, foo: bar, value: 123} +# - {index: b, value: 4} +# - {index: c, foo: baz} +# - {index: d, foo: qux} + +- name: Example 4. Merge two lists. By default, replace nested lists. + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, foo: [X1, X2]} + - {index: b, foo: [X1, X2]} + list2: + - {index: a, foo: [Y1, Y2]} + - {index: b, foo: [Y1, Y2]} + r: "{{ [list1, list2] | community.general.lists_mergeby('index') }}" + +# r: +# - {index: a, foo: [Y1, Y2]} +# - {index: b, foo: [Y1, Y2]} + +- name: Example 5. Merge two lists. Append nested lists. + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, foo: [X1, X2]} + - {index: b, foo: [X1, X2]} + list2: + - {index: a, foo: [Y1, Y2]} + - {index: b, foo: [Y1, Y2]} + r: "{{ [list1, list2] | community.general.lists_mergeby('index', list_merge='append') }}" + +# r: +# - {index: a, foo: [X1, X2, Y1, Y2]} +# - {index: b, foo: [X1, X2, Y1, Y2]} + +- name: Example 6. Merge two lists. By default, do not merge nested dictionaries. + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, foo: {x: 1, y: 2}} + - {index: b, foo: [X1, X2]} + list2: + - {index: a, foo: {y: 3, z: 4}} + - {index: b, foo: [Y1, Y2]} + r: "{{ [list1, list2] | community.general.lists_mergeby('index') }}" + +# r: +# - {index: a, foo: {y: 3, z: 4}} +# - {index: b, foo: [Y1, Y2]} + +- name: Example 7. Merge two lists. Merge nested dictionaries too. + ansible.builtin.debug: + var: r + vars: + list1: + - {index: a, foo: {x: 1, y: 2}} + - {index: b, foo: [X1, X2]} + list2: + - {index: a, foo: {y: 3, z: 4}} + - {index: b, foo: [Y1, Y2]} + r: "{{ [list1, list2] | community.general.lists_mergeby('index', recursive=true) }}" + +# r: +# - {index: a, foo: {x:1, y: 3, z: 4}} +# - {index: b, foo: [Y1, Y2]} +""" + +RETURN = r""" +_value: + description: The merged list. + type: list + elements: dictionary +""" from ansible.errors import AnsibleFilterError -from ansible.module_utils.six import string_types -from ansible.module_utils.common._collections_compat import Mapping, Sequence +from collections.abc import Mapping, Sequence from ansible.utils.vars import merge_hash -from ansible.release import __version__ as ansible_version -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion from collections import defaultdict from operator import itemgetter def list_mergeby(x, y, index, recursive=False, list_merge='replace'): - ''' Merge 2 lists by attribute 'index'. The function merge_hash from ansible.utils.vars is used. - This function is used by the function lists_mergeby. + '''Merge 2 lists by attribute 'index'. The function 'merge_hash' + from ansible.utils.vars is used. This function is used by the + function lists_mergeby. ''' d = defaultdict(dict) - for l in (x, y): - for elem in l: + for lst in (x, y): + for elem in lst: if not isinstance(elem, Mapping): msg = "Elements of list arguments for lists_mergeby must be dictionaries. %s is %s" raise AnsibleFilterError(msg % (elem, type(elem))) @@ -126,20 +220,9 @@ def list_mergeby(x, y, index, recursive=False, list_merge='replace'): def lists_mergeby(*terms, **kwargs): - ''' Merge 2 or more lists by attribute 'index'. Optional parameters 'recursive' and 'list_merge' - control the merging of the lists in values. The function merge_hash from ansible.utils.vars - is used. To learn details on how to use the parameters 'recursive' and 'list_merge' see - Ansible User's Guide chapter "Using filters to manipulate data" section "Combining - hashes/dictionaries". - - Example: - - debug: - msg: "{{ list1| - community.general.lists_mergeby(list2, - 'index', - recursive=True, - list_merge='append')| - list }}" + '''Merge 2 or more lists by attribute 'index'. To learn details + on how to use the parameters 'recursive' and 'list_merge' see + the filter ansible.builtin.combine. ''' recursive = kwargs.pop('recursive', False) @@ -157,7 +240,7 @@ def lists_mergeby(*terms, **kwargs): "must be lists. %s is %s") raise AnsibleFilterError(msg % (sublist, type(sublist))) if len(sublist) > 0: - if all(isinstance(l, Sequence) for l in sublist): + if all(isinstance(lst, Sequence) for lst in sublist): for item in sublist: flat_list.append(item) else: @@ -172,7 +255,7 @@ def lists_mergeby(*terms, **kwargs): index = terms[-1] - if not isinstance(index, string_types): + if not isinstance(index, str): msg = ("First argument after the lists for community.general.lists_mergeby must be string. " "%s is %s") raise AnsibleFilterError(msg % (index, type(index))) diff --git a/plugins/filter/lists_symmetric_difference.yml b/plugins/filter/lists_symmetric_difference.yml new file mode 100644 index 0000000000..abd8caab8a --- /dev/null +++ b/plugins/filter/lists_symmetric_difference.yml @@ -0,0 +1,48 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: lists_symmetric_difference + short_description: Symmetric Difference of lists with a predictive order + version_added: 8.4.0 + description: + - Provide a unique list containing the symmetric difference of two or more lists. + - The order of the items in the resulting list is preserved. + options: + _input: + description: A list. + type: list + elements: any + required: true + flatten: + description: Whether to remove one hierarchy level from the input list. + type: boolean + default: false + author: + - Christoph Fiehe (@cfiehe) + +EXAMPLES: | + - name: Return the symmetric difference of list1 and list2. + ansible.builtin.debug: + msg: "{{ list1 | community.general.lists_symmetric_difference(list2) }}" + vars: + list1: [1, 2, 5, 3, 4, 10] + list2: [1, 2, 3, 4, 5, 11, 99] + # => [10, 11, 99] + + - name: Return the symmetric difference of list1, list2 and list3. + ansible.builtin.debug: + msg: "{{ [list1, list2, list3] | community.general.lists_symmetric_difference(flatten=true) }}" + vars: + list1: [1, 2, 5, 3, 4, 10] + list2: [1, 2, 3, 4, 5, 11, 99] + list3: [1, 2, 3, 4, 5, 10, 99, 101] + # => [11, 1, 2, 3, 4, 5, 101] + +RETURN: + _value: + description: A unique list containing the symmetric difference of two or more lists. + type: list + elements: any diff --git a/plugins/filter/lists_union.yml b/plugins/filter/lists_union.yml new file mode 100644 index 0000000000..8c1ffb4f87 --- /dev/null +++ b/plugins/filter/lists_union.yml @@ -0,0 +1,48 @@ +--- +# Copyright (c) Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: lists_union + short_description: Union of lists with a predictive order + version_added: 8.4.0 + description: + - Provide a unique list of all the elements of two or more lists. + - The order of the items in the resulting list is preserved. + options: + _input: + description: A list. + type: list + elements: any + required: true + flatten: + description: Whether to remove one hierarchy level from the input list. + type: boolean + default: false + author: + - Christoph Fiehe (@cfiehe) + +EXAMPLES: | + - name: Return the union of list1, list2 and list3. + ansible.builtin.debug: + msg: "{{ list1 | community.general.lists_union(list2, list3) }}" + vars: + list1: [1, 2, 5, 3, 4, 10] + list2: [1, 2, 3, 4, 5, 11, 99] + list3: [1, 2, 3, 4, 5, 10, 99, 101] + # => [1, 2, 5, 3, 4, 10, 11, 99, 101] + + - name: Return the union of list1 and list2. + ansible.builtin.debug: + msg: "{{ [list1, list2] | community.general.lists_union(flatten=true) }}" + vars: + list1: [1, 2, 5, 3, 4, 10] + list2: [1, 2, 3, 4, 5, 11, 99] + # => [1, 2, 5, 3, 4, 10, 11, 99] + +RETURN: + _value: + description: A unique list of all the elements from the provided lists. + type: list + elements: any diff --git a/plugins/filter/random_mac.py b/plugins/filter/random_mac.py index 662c62b07c..e5e6201f1c 100644 --- a/plugins/filter/random_mac.py +++ b/plugins/filter/random_mac.py @@ -1,31 +1,29 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2020 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later # Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: random_mac - short_description: Generate a random MAC address - description: - - Generates random networking interfaces MAC addresses for a given prefix. - options: - _input: - description: A string prefix to use as a basis for the random MAC generated. - type: string - required: true - seed: - description: - - A randomization seed to initialize the process, used to get repeatable results. - - If no seed is provided, a system random source such as C(/dev/urandom) is used. - required: false - type: string -''' +DOCUMENTATION = r""" +name: random_mac +short_description: Generate a random MAC address +description: + - Generates random networking interfaces MAC addresses for a given prefix. +options: + _input: + description: A string prefix to use as a basis for the random MAC generated. + type: string + required: true + seed: + description: + - A randomization seed to initialize the process, used to get repeatable results. + - If no seed is provided, a system random source such as C(/dev/urandom) is used. + required: false + type: string +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Random MAC given a prefix ansible.builtin.debug: msg: "{{ '52:54:00' | community.general.random_mac }}" @@ -34,35 +32,32 @@ EXAMPLES = ''' - name: With a seed ansible.builtin.debug: msg: "{{ '52:54:00' | community.general.random_mac(seed=inventory_hostname) }}" -''' +""" -RETURN = ''' - _value: - description: The generated MAC. - type: string -''' +RETURN = r""" +_value: + description: The generated MAC. + type: string +""" import re from random import Random, SystemRandom from ansible.errors import AnsibleFilterError -from ansible.module_utils.six import string_types def random_mac(value, seed=None): ''' takes string prefix, and return it completed with random bytes to get a complete 6 bytes MAC address ''' - if not isinstance(value, string_types): - raise AnsibleFilterError('Invalid value type (%s) for random_mac (%s)' % - (type(value), value)) + if not isinstance(value, str): + raise AnsibleFilterError(f'Invalid value type ({type(value)}) for random_mac ({value})') value = value.lower() mac_items = value.split(':') if len(mac_items) > 5: - raise AnsibleFilterError('Invalid value (%s) for random_mac: 5 colon(:) separated' - ' items max' % value) + raise AnsibleFilterError(f'Invalid value ({value}) for random_mac: 5 colon(:) separated items max') err = "" for mac in mac_items: @@ -70,11 +65,11 @@ def random_mac(value, seed=None): err += ",empty item" continue if not re.match('[a-f0-9]{2}', mac): - err += ",%s not hexa byte" % mac + err += f",{mac} not hexa byte" err = err.strip(',') if err: - raise AnsibleFilterError('Invalid value (%s) for random_mac: %s' % (value, err)) + raise AnsibleFilterError(f'Invalid value ({value}) for random_mac: {err}') if seed is None: r = SystemRandom() @@ -84,7 +79,7 @@ def random_mac(value, seed=None): v = r.randint(68719476736, 1099511627775) # Select first n chars to complement input prefix remain = 2 * (6 - len(mac_items)) - rnd = ('%x' % v)[:remain] + rnd = f'{v:x}'[:remain] return value + re.sub(r'(..)', r':\1', rnd) diff --git a/plugins/filter/remove_keys.py b/plugins/filter/remove_keys.py new file mode 100644 index 0000000000..fc134b41d0 --- /dev/null +++ b/plugins/filter/remove_keys.py @@ -0,0 +1,136 @@ +# Copyright (c) 2024 Vladimir Botka +# Copyright (c) 2024 Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: remove_keys +short_description: Remove specific keys from dictionaries in a list +version_added: "9.1.0" +author: + - Vladimir Botka (@vbotka) + - Felix Fontein (@felixfontein) +description: This filter removes only specified keys from a provided list of dictionaries. +options: + _input: + description: + - A list of dictionaries. + - Top level keys must be strings. + type: list + elements: dictionary + required: true + target: + description: + - A single key or key pattern to remove, or a list of keys or keys patterns to remove. + - If O(matching_parameter=regex) there must be exactly one pattern provided. + type: raw + required: true + matching_parameter: + description: Specify the matching option of target keys. + type: str + default: equal + choices: + equal: Matches keys of exactly one of the O(target) items. + starts_with: Matches keys that start with one of the O(target) items. + ends_with: Matches keys that end with one of the O(target) items. + regex: + - Matches keys that match the regular expresion provided in O(target). + - In this case, O(target) must be a regex string or a list with single regex string. +""" + +EXAMPLES = r""" +- l: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + + # 1) By default match keys that equal any of the items in the target. +- t: [k0_x0, k1_x1] + r: "{{ l | community.general.remove_keys(target=t) }}" + + # 2) Match keys that start with any of the items in the target. +- t: [k0, k1] + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='starts_with') }}" + + # 3) Match keys that end with any of the items in target. +- t: [x0, x1] + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='ends_with') }}" + + # 4) Match keys by the regex. +- t: ['^.*[01]_x.*$'] + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='regex') }}" + + # 5) Match keys by the regex. +- t: '^.*[01]_x.*$' + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='regex') }}" + + # The results of above examples 1-5 are all the same. +- r: + - {k2_x2: [C0], k3_x3: foo} + - {k2_x2: [C1], k3_x3: bar} + + # 6) By default match keys that equal the target. +- t: k0_x0 + r: "{{ l | community.general.remove_keys(target=t) }}" + + # 7) Match keys that start with the target. +- t: k0 + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='starts_with') }}" + + # 8) Match keys that end with the target. +- t: x0 + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='ends_with') }}" + + # 9) Match keys by the regex. +- t: '^.*0_x.*$' + r: "{{ l | community.general.remove_keys(target=t, matching_parameter='regex') }}" + + # The results of above examples 6-9 are all the same. +- r: + - {k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k1_x1: B1, k2_x2: [C1], k3_x3: bar} +""" + +RETURN = r""" +_value: + description: The list of dictionaries with selected keys removed. + type: list + elements: dictionary +""" + +from ansible_collections.community.general.plugins.plugin_utils.keys_filter import ( + _keys_filter_params, + _keys_filter_target_str) + + +def remove_keys(data, target=None, matching_parameter='equal'): + """remove specific keys from dictionaries in a list""" + + # test parameters + _keys_filter_params(data, matching_parameter) + # test and transform target + tt = _keys_filter_target_str(target, matching_parameter) + + if matching_parameter == 'equal': + def keep_key(key): + return key not in tt + elif matching_parameter == 'starts_with': + def keep_key(key): + return not key.startswith(tt) + elif matching_parameter == 'ends_with': + def keep_key(key): + return not key.endswith(tt) + elif matching_parameter == 'regex': + def keep_key(key): + return tt.match(key) is None + + return [{k: v for k, v in d.items() if keep_key(k)} for d in data] + + +class FilterModule(object): + + def filters(self): + return { + 'remove_keys': remove_keys, + } diff --git a/plugins/filter/replace_keys.py b/plugins/filter/replace_keys.py new file mode 100644 index 0000000000..5af0b22f62 --- /dev/null +++ b/plugins/filter/replace_keys.py @@ -0,0 +1,178 @@ +# Copyright (c) 2024 Vladimir Botka +# Copyright (c) 2024 Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: replace_keys +short_description: Replace specific keys in a list of dictionaries +version_added: "9.1.0" +author: + - Vladimir Botka (@vbotka) + - Felix Fontein (@felixfontein) +description: This filter replaces specified keys in a provided list of dictionaries. +options: + _input: + description: + - A list of dictionaries. + - Top level keys must be strings. + type: list + elements: dictionary + required: true + target: + description: + - A list of dictionaries with attributes C(before) and C(after). + - The value of O(target[].after) replaces key matching O(target[].before). + type: list + elements: dictionary + required: true + suboptions: + before: + description: + - A key or key pattern to change. + - The interpretation of O(target[].before) depends on O(matching_parameter). + - For a key that matches multiple O(target[].before)s, the B(first) matching O(target[].after) is used. + type: str + after: + description: A matching key change to. + type: str + matching_parameter: + description: Specify the matching option of target keys. + type: str + default: equal + choices: + equal: Matches keys of exactly one of the O(target[].before) items. + starts_with: Matches keys that start with one of the O(target[].before) items. + ends_with: Matches keys that end with one of the O(target[].before) items. + regex: Matches keys that match one of the regular expressions provided in O(target[].before). +""" + +EXAMPLES = r""" +- l: + - {k0_x0: A0, k1_x1: B0, k2_x2: [C0], k3_x3: foo} + - {k0_x0: A1, k1_x1: B1, k2_x2: [C1], k3_x3: bar} + + # 1) By default, replace keys that are equal any of the attributes before. +- t: + - {before: k0_x0, after: a0} + - {before: k1_x1, after: a1} + r: "{{ l | community.general.replace_keys(target=t) }}" + + # 2) Replace keys that starts with any of the attributes before. +- t: + - {before: k0, after: a0} + - {before: k1, after: a1} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='starts_with') }}" + + # 3) Replace keys that ends with any of the attributes before. +- t: + - {before: x0, after: a0} + - {before: x1, after: a1} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='ends_with') }}" + + # 4) Replace keys that match any regex of the attributes before. +- t: + - {before: "^.*0_x.*$", after: a0} + - {before: "^.*1_x.*$", after: a1} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='regex') }}" + + # The results of above examples 1-4 are all the same. +- r: + - {a0: A0, a1: B0, k2_x2: [C0], k3_x3: foo} + - {a0: A1, a1: B1, k2_x2: [C1], k3_x3: bar} + + # 5) If more keys match the same attribute before the last one will be used. +- t: + - {before: "^.*_x.*$", after: X} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='regex') }}" + + # gives + +- r: + - X: foo + - X: bar + + # 6) If there are items with equal attribute before the first one will be used. +- t: + - {before: "^.*_x.*$", after: X} + - {before: "^.*_x.*$", after: Y} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='regex') }}" + + # gives + +- r: + - X: foo + - X: bar + + # 7) If there are more matches for a key the first one will be used. +- l: + - {aaa1: A, bbb1: B, ccc1: C} + - {aaa2: D, bbb2: E, ccc2: F} +- t: + - {before: a, after: X} + - {before: aa, after: Y} + r: "{{ l | community.general.replace_keys(target=t, matching_parameter='starts_with') }}" + + # gives + +- r: + - {X: A, bbb1: B, ccc1: C} + - {X: D, bbb2: E, ccc2: F} +""" + +RETURN = r""" +_value: + description: The list of dictionaries with replaced keys. + type: list + elements: dictionary +""" + +from ansible_collections.community.general.plugins.plugin_utils.keys_filter import ( + _keys_filter_params, + _keys_filter_target_dict) + + +def replace_keys(data, target=None, matching_parameter='equal'): + """replace specific keys in a list of dictionaries""" + + # test parameters + _keys_filter_params(data, matching_parameter) + # test and transform target + tz = _keys_filter_target_dict(target, matching_parameter) + + if matching_parameter == 'equal': + def replace_key(key): + for b, a in tz: + if key == b: + return a + return key + elif matching_parameter == 'starts_with': + def replace_key(key): + for b, a in tz: + if key.startswith(b): + return a + return key + elif matching_parameter == 'ends_with': + def replace_key(key): + for b, a in tz: + if key.endswith(b): + return a + return key + elif matching_parameter == 'regex': + def replace_key(key): + for b, a in tz: + if b.match(key): + return a + return key + + return [{replace_key(k): v for k, v in d.items()} for d in data] + + +class FilterModule(object): + + def filters(self): + return { + 'replace_keys': replace_keys, + } diff --git a/plugins/filter/reveal_ansible_type.py b/plugins/filter/reveal_ansible_type.py new file mode 100644 index 0000000000..e068702355 --- /dev/null +++ b/plugins/filter/reveal_ansible_type.py @@ -0,0 +1,147 @@ +# Copyright (c) 2024 Vladimir Botka +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: reveal_ansible_type +short_description: Return input type +version_added: "9.2.0" +author: Vladimir Botka (@vbotka) +description: This filter returns input type. +options: + _input: + description: Input data. + type: raw + required: true + alias: + description: Data type aliases. + default: {} + type: dictionary +""" + +EXAMPLES = r""" +# Substitution converts str to AnsibleUnicode or _AnsibleTaggedStr +# ---------------------------------------------------------------- + +# String. AnsibleUnicode or _AnsibleTaggedStr. +- data: "abc" + result: '{{ data | community.general.reveal_ansible_type }}' +# result => AnsibleUnicode (or _AnsibleTaggedStr) + +# String. AnsibleUnicode/_AnsibleTaggedStr alias str. +- alias: {"AnsibleUnicode": "str", "_AnsibleTaggedStr": "str"} + data: "abc" + result: '{{ data | community.general.reveal_ansible_type(alias) }}' +# result => str + +# List. All items are AnsibleUnicode/_AnsibleTaggedStr. +- data: ["a", "b", "c"] + result: '{{ data | community.general.reveal_ansible_type }}' +# result => list[AnsibleUnicode] or list[_AnsibleTaggedStr] + +# Dictionary. All keys and values are AnsibleUnicode/_AnsibleTaggedStr. +- data: {"a": "foo", "b": "bar", "c": "baz"} + result: '{{ data | community.general.reveal_ansible_type }}' +# result => dict[AnsibleUnicode, AnsibleUnicode] or dict[_AnsibleTaggedStr, _AnsibleTaggedStr] + +# No substitution and no alias. Type of strings is str +# ---------------------------------------------------- + +# String +- result: '{{ "abc" | community.general.reveal_ansible_type }}' +# result => str + +# Integer +- result: '{{ 123 | community.general.reveal_ansible_type }}' +# result => int + +# Float +- result: '{{ 123.45 | community.general.reveal_ansible_type }}' +# result => float + +# Boolean +- result: '{{ true | community.general.reveal_ansible_type }}' +# result => bool + +# List. All items are strings. +- result: '{{ ["a", "b", "c"] | community.general.reveal_ansible_type }}' +# result => list[str] + +# List of dictionaries. +- result: '{{ [{"a": 1}, {"b": 2}] | community.general.reveal_ansible_type }}' +# result => list[dict] + +# Dictionary. All keys are strings. All values are integers. +- result: '{{ {"a": 1} | community.general.reveal_ansible_type }}' +# result => dict[str, int] + +# Dictionary. All keys are strings. All values are integers. +- result: '{{ {"a": 1, "b": 2} | community.general.reveal_ansible_type }}' +# result => dict[str, int] + +# Type of strings is AnsibleUnicode, _AnsibleTaggedStr, or str +# ------------------------------------------------------------ + +# Dictionary. The keys are integers or strings. All values are strings. +- alias: + AnsibleUnicode: str + _AnsibleTaggedStr: str + _AnsibleTaggedInt: int + data: {1: 'a', 'b': 'b'} + result: '{{ data | community.general.reveal_ansible_type(alias) }}' +# result => dict[int|str, str] + +# Dictionary. All keys are integers. All values are keys. +- alias: + AnsibleUnicode: str + _AnsibleTaggedStr: str + _AnsibleTaggedInt: int + data: {1: 'a', 2: 'b'} + result: '{{ data | community.general.reveal_ansible_type(alias) }}' +# result => dict[int, str] + +# Dictionary. All keys are strings. Multiple types values. +- alias: + AnsibleUnicode: str + _AnsibleTaggedStr: str + _AnsibleTaggedInt: int + _AnsibleTaggedFloat: float + data: {'a': 1, 'b': 1.1, 'c': 'abc', 'd': true, 'e': ['x', 'y', 'z'], 'f': {'x': 1, 'y': 2}} + result: '{{ data | community.general.reveal_ansible_type(alias) }}' +# result => dict[str, bool|dict|float|int|list|str] + +# List. Multiple types items. +- alias: + AnsibleUnicode: str + _AnsibleTaggedStr: str + _AnsibleTaggedInt: int + _AnsibleTaggedFloat: float + data: [1, 2, 1.1, 'abc', true, ['x', 'y', 'z'], {'x': 1, 'y': 2}] + result: '{{ data | community.general.reveal_ansible_type(alias) }}' +# result => list[bool|dict|float|int|list|str] +""" + +RETURN = r""" +_value: + description: Type of the data. + type: str +""" + +from ansible_collections.community.general.plugins.plugin_utils.ansible_type import _ansible_type + + +def reveal_ansible_type(data, alias=None): + """Returns data type""" + + # TODO: expose use_native_type parameter + return _ansible_type(data, alias) + + +class FilterModule(object): + + def filters(self): + return { + 'reveal_ansible_type': reveal_ansible_type + } diff --git a/plugins/filter/time.py b/plugins/filter/time.py index 25970cd260..e48e24216a 100644 --- a/plugins/filter/time.py +++ b/plugins/filter/time.py @@ -1,10 +1,8 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2020, René Moser # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re from ansible.errors import AnsibleFilterError @@ -57,10 +55,10 @@ def to_time_unit(human_time, unit='ms', **kwargs): unit = unit_to_short_form.get(unit.rstrip('s'), unit) if unit not in unit_factors: - raise AnsibleFilterError("to_time_unit() can not convert to the following unit: %s. " - "Available units (singular or plural): %s. " - "Available short units: %s" - % (unit, ', '.join(unit_to_short_form.keys()), ', '.join(unit_factors.keys()))) + raise AnsibleFilterError(( + f"to_time_unit() can not convert to the following unit: {unit}. Available units (singular or plural):" + f"{', '.join(unit_to_short_form.keys())}. Available short units: {', '.join(unit_factors.keys())}" + )) if 'year' in kwargs: unit_factors['y'] = unit_factors['y'][:-1] + [kwargs.pop('year')] @@ -68,14 +66,14 @@ def to_time_unit(human_time, unit='ms', **kwargs): unit_factors['mo'] = unit_factors['mo'][:-1] + [kwargs.pop('month')] if kwargs: - raise AnsibleFilterError('to_time_unit() got unknown keyword arguments: %s' % ', '.join(kwargs.keys())) + raise AnsibleFilterError(f"to_time_unit() got unknown keyword arguments: {', '.join(kwargs.keys())}") result = 0 for h_time_string in human_time.split(): res = re.match(r'(-?\d+)(\w+)', h_time_string) if not res: raise AnsibleFilterError( - "to_time_unit() can not interpret following string: %s" % human_time) + f"to_time_unit() can not interpret following string: {human_time}") h_time_int = int(res.group(1)) h_time_unit = res.group(2) @@ -83,7 +81,7 @@ def to_time_unit(human_time, unit='ms', **kwargs): h_time_unit = unit_to_short_form.get(h_time_unit.rstrip('s'), h_time_unit) if h_time_unit not in unit_factors: raise AnsibleFilterError( - "to_time_unit() can not interpret following string: %s" % human_time) + f"to_time_unit() can not interpret following string: {human_time}") time_in_milliseconds = h_time_int * multiply(unit_factors[h_time_unit]) result += time_in_milliseconds diff --git a/plugins/filter/to_days.yml b/plugins/filter/to_days.yml index 19bc8faf23..c76697f1ee 100644 --- a/plugins/filter/to_days.yml +++ b/plugins/filter/to_days.yml @@ -5,7 +5,7 @@ DOCUMENTATION: name: to_days - short_description: Converte a duration string to days + short_description: Converts a duration string to days version_added: 0.2.0 description: - Parse a human readable time duration string and convert to days. @@ -13,12 +13,12 @@ DOCUMENTATION: _input: description: - The time string to convert. - - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week, - C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec) - and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s) - can be added to a unit as well, so C(seconds) is the same as C(second). + - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week, + V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec) + and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s) + can be added to a unit as well, so V(seconds) is the same as V(second). - Valid strings are space separated combinations of an integer with an optional minus sign and a unit. - - Examples are C(1h), C(-5m), and C(3h -5m 6s). + - Examples are V(1h), V(-5m), and V(3h -5m 6s). type: string required: true year: diff --git a/plugins/filter/to_hours.yml b/plugins/filter/to_hours.yml index 83826a5908..520740897b 100644 --- a/plugins/filter/to_hours.yml +++ b/plugins/filter/to_hours.yml @@ -5,7 +5,7 @@ DOCUMENTATION: name: to_hours - short_description: Converte a duration string to hours + short_description: Converts a duration string to hours version_added: 0.2.0 description: - Parse a human readable time duration string and convert to hours. @@ -13,12 +13,12 @@ DOCUMENTATION: _input: description: - The time string to convert. - - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week, - C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec) - and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s) - can be added to a unit as well, so C(seconds) is the same as C(second). + - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week, + V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec) + and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s) + can be added to a unit as well, so V(seconds) is the same as V(second). - Valid strings are space separated combinations of an integer with an optional minus sign and a unit. - - Examples are C(1h), C(-5m), and C(3h -5m 6s). + - Examples are V(1h), V(-5m), and V(3h -5m 6s). type: string required: true year: diff --git a/plugins/filter/to_ini.py b/plugins/filter/to_ini.py new file mode 100644 index 0000000000..a70740b8aa --- /dev/null +++ b/plugins/filter/to_ini.py @@ -0,0 +1,100 @@ + +# Copyright (c) 2023, Steffen Scheib +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: to_ini +short_description: Converts a dictionary to the INI file format +version_added: 8.2.0 +author: Steffen Scheib (@sscheib) +description: + - Converts a dictionary to the INI file format. +options: + _input: + description: The dictionary that should be converted to the INI format. + type: dictionary + required: true +""" + +EXAMPLES = r""" +- name: Define a dictionary + ansible.builtin.set_fact: + my_dict: + section_name: + key_name: 'key value' + + another_section: + connection: 'ssh' + +- name: Write dictionary to INI file + ansible.builtin.copy: + dest: /tmp/test.ini + content: '{{ my_dict | community.general.to_ini }}' + + # /tmp/test.ini will look like this: + # [section_name] + # key_name = key value + # + # [another_section] + # connection = ssh +""" + +RETURN = r""" +_value: + description: A string formatted as INI file. + type: string +""" + +from collections.abc import Mapping +from configparser import ConfigParser +from io import StringIO +from ansible.errors import AnsibleFilterError + + +class IniParser(ConfigParser): + ''' Implements a configparser which sets the correct optionxform ''' + + def __init__(self): + super().__init__(interpolation=None) + self.optionxform = str + + +def to_ini(obj): + ''' Read the given dict and return an INI formatted string ''' + + if not isinstance(obj, Mapping): + raise AnsibleFilterError(f'to_ini requires a dict, got {type(obj)}') + + ini_parser = IniParser() + + try: + ini_parser.read_dict(obj) + except Exception as ex: + raise AnsibleFilterError('to_ini failed to parse given dict:' + f'{ex}', orig_exc=ex) + + # catching empty dicts + if obj == dict(): + raise AnsibleFilterError('to_ini received an empty dict. ' + 'An empty dict cannot be converted.') + + config = StringIO() + ini_parser.write(config) + + # config.getvalue() returns two \n at the end + # with the below insanity, we remove the very last character of + # the resulting string + return ''.join(config.getvalue().rsplit(config.getvalue()[-1], 1)) + + +class FilterModule(object): + ''' Query filter ''' + + def filters(self): + + return { + 'to_ini': to_ini + } diff --git a/plugins/filter/to_milliseconds.yml b/plugins/filter/to_milliseconds.yml index b6bb7e4be0..f25bd86623 100644 --- a/plugins/filter/to_milliseconds.yml +++ b/plugins/filter/to_milliseconds.yml @@ -5,7 +5,7 @@ DOCUMENTATION: name: to_milliseconds - short_description: Converte a duration string to milliseconds + short_description: Converts a duration string to milliseconds version_added: 0.2.0 description: - Parse a human readable time duration string and convert to milliseconds. @@ -13,12 +13,12 @@ DOCUMENTATION: _input: description: - The time string to convert. - - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week, - C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec) - and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s) - can be added to a unit as well, so C(seconds) is the same as C(second). + - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week, + V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec) + and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s) + can be added to a unit as well, so V(seconds) is the same as V(second). - Valid strings are space separated combinations of an integer with an optional minus sign and a unit. - - Examples are C(1h), C(-5m), and C(3h -5m 6s). + - Examples are V(1h), V(-5m), and V(3h -5m 6s). type: string required: true year: diff --git a/plugins/filter/to_minutes.yml b/plugins/filter/to_minutes.yml index 3b85dadc43..924fb6feb3 100644 --- a/plugins/filter/to_minutes.yml +++ b/plugins/filter/to_minutes.yml @@ -5,7 +5,7 @@ DOCUMENTATION: name: to_minutes - short_description: Converte a duration string to minutes + short_description: Converts a duration string to minutes version_added: 0.2.0 description: - Parse a human readable time duration string and convert to minutes. @@ -13,12 +13,12 @@ DOCUMENTATION: _input: description: - The time string to convert. - - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week, - C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec) - and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s) - can be added to a unit as well, so C(seconds) is the same as C(second). + - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week, + V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec) + and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s) + can be added to a unit as well, so V(seconds) is the same as V(second). - Valid strings are space separated combinations of an integer with an optional minus sign and a unit. - - Examples are C(1h), C(-5m), and C(3h -5m 6s). + - Examples are V(1h), V(-5m), and V(3h -5m 6s). type: string required: true year: diff --git a/plugins/filter/to_months.yml b/plugins/filter/to_months.yml index f13cee918e..09e9c38b5d 100644 --- a/plugins/filter/to_months.yml +++ b/plugins/filter/to_months.yml @@ -5,7 +5,7 @@ DOCUMENTATION: name: to_months - short_description: Converte a duration string to months + short_description: Convert a duration string to months version_added: 0.2.0 description: - Parse a human readable time duration string and convert to months. @@ -13,12 +13,12 @@ DOCUMENTATION: _input: description: - The time string to convert. - - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week, - C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec) - and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s) - can be added to a unit as well, so C(seconds) is the same as C(second). + - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week, + V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec) + and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s) + can be added to a unit as well, so V(seconds) is the same as V(second). - Valid strings are space separated combinations of an integer with an optional minus sign and a unit. - - Examples are C(1h), C(-5m), and C(3h -5m 6s). + - Examples are V(1h), V(-5m), and V(3h -5m 6s). type: string required: true year: diff --git a/plugins/filter/to_nice_yaml.yml b/plugins/filter/to_nice_yaml.yml new file mode 100644 index 0000000000..fe7a316f46 --- /dev/null +++ b/plugins/filter/to_nice_yaml.yml @@ -0,0 +1,89 @@ +# Copyright (c) Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: to_nice_yaml + author: + - Ansible Core Team + - Felix Fontein (@felixfontein) + version_added: 11.3.0 + short_description: Convert variable to YAML string + description: + - Converts an Ansible variable into a YAML string representation, without preserving vaulted strings as P(ansible.builtin.to_yaml#filter). + - This filter functions as a wrapper to the L(Python PyYAML library, https://pypi.org/project/PyYAML/)'s C(yaml.dump) function. + positional: _input + options: + _input: + description: + - A variable or expression that returns a data structure. + type: raw + required: true + indent: + description: + - Number of spaces to indent Python structures, mainly used for display to humans. + type: integer + default: 2 + sort_keys: + description: + - Affects sorting of dictionary keys. + default: true + type: bool + default_style: + description: + - Indicates the style of the scalar. + choices: + - '' + - "'" + - '"' + - '|' + - '>' + type: string + canonical: + description: + - If set to V(true), export tag type to the output. + type: bool + width: + description: + - Set the preferred line width. + type: integer + line_break: + description: + - Specify the line break. + type: string + encoding: + description: + - Specify the output encoding. + type: string + explicit_start: + description: + - If set to V(true), adds an explicit start using C(---). + type: bool + explicit_end: + description: + - If set to V(true), adds an explicit end using C(...). + type: bool + redact_sensitive_values: + description: + - If set to V(true), vaulted strings are replaced by V() instead of being decrypted. + - With future ansible-core versions, this can extend to other strings tagged as sensitive. + - B(Note) that with ansible-core 2.18 and before this might not yield the expected result + since these versions of ansible-core strip the vault information away from strings that are + part of more complex data structures specified in C(vars). + type: bool + default: false + notes: + - More options may be available, see L(PyYAML documentation, https://pyyaml.org/wiki/PyYAMLDocumentation) for details. + - >- + These parameters to C(yaml.dump) are not accepted, as they are overridden internally: O(ignore:allow_unicode). + +EXAMPLES: | + --- + # Dump variable in a template to create a YAML document + value: "{{ github_workflow | community.general.to_nice_yaml }}" + +RETURN: + _value: + description: + - The YAML serialized string representing the variable structure inputted. + type: string diff --git a/plugins/filter/to_prettytable.py b/plugins/filter/to_prettytable.py new file mode 100644 index 0000000000..266a426cf2 --- /dev/null +++ b/plugins/filter/to_prettytable.py @@ -0,0 +1,409 @@ +# Copyright (c) 2025, Timur Gadiev +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: to_prettytable +short_description: Format a list of dictionaries as an ASCII table +version_added: "10.7.0" +author: Timur Gadiev (@tgadiev) +description: + - This filter takes a list of dictionaries and formats it as an ASCII table using the I(prettytable) Python library. +requirements: + - prettytable +options: + _input: + description: A list of dictionaries to format. + type: list + elements: dictionary + required: true + column_order: + description: List of column names to specify the order of columns in the table. + type: list + elements: string + header_names: + description: List of custom header names to use instead of dictionary keys. + type: list + elements: string + column_alignments: + description: + - Dictionary where keys are column names and values are alignment settings. Valid alignment values are C(left), C(center), + C(right), C(l), C(c), or C(r). + - "For example, V({'name': 'left', 'id': 'right'}) aligns the C(name) column to the left and the C(id) column to the + right." + type: dictionary +""" + +EXAMPLES = r""" +- name: Set a list of users + ansible.builtin.set_fact: + users: + - name: Alice + age: 25 + role: admin + - name: Bob + age: 30 + role: user + +- name: Display a list of users as a table + ansible.builtin.debug: + msg: >- + {{ + users | community.general.to_prettytable + }} + +- name: Display a table with custom column ordering + ansible.builtin.debug: + msg: >- + {{ + users | community.general.to_prettytable( + column_order=['role', 'name', 'age'] + ) + }} + +- name: Display a table with selective column output (only show name and role fields) + ansible.builtin.debug: + msg: >- + {{ + users | community.general.to_prettytable( + column_order=['name', 'role'] + ) + }} + +- name: Display a table with custom headers + ansible.builtin.debug: + msg: >- + {{ + users | community.general.to_prettytable( + header_names=['User Name', 'User Age', 'User Role'] + ) + }} + +- name: Display a table with custom alignments + ansible.builtin.debug: + msg: >- + {{ + users | community.general.to_prettytable( + column_alignments={'name': 'center', 'age': 'right', 'role': 'left'} + ) + }} + +- name: Combine multiple options + ansible.builtin.debug: + msg: >- + {{ + users | community.general.to_prettytable( + column_order=['role', 'name', 'age'], + header_names=['Position', 'Full Name', 'Years'], + column_alignments={'name': 'center', 'age': 'right', 'role': 'left'} + ) + }} +""" + +RETURN = r""" +_value: + description: The formatted ASCII table. + type: string +""" + +try: + import prettytable + HAS_PRETTYTABLE = True +except ImportError: + HAS_PRETTYTABLE = False + +from ansible.errors import AnsibleFilterError +from ansible.module_utils.common.text.converters import to_text + + +class TypeValidationError(AnsibleFilterError): + """Custom exception for type validation errors. + + Args: + obj: The object with incorrect type + expected: Description of expected type + """ + def __init__(self, obj, expected): + type_name = "string" if isinstance(obj, str) else type(obj).__name__ + super().__init__(f"Expected {expected}, got a {type_name}") + + +def _validate_list_param(param, param_name, ensure_strings=True): + """Validate a parameter is a list and optionally ensure all elements are strings. + + Args: + param: The parameter to validate + param_name: The name of the parameter for error messages + ensure_strings: Whether to check that all elements are strings + + Raises: + AnsibleFilterError: If validation fails + """ + # Map parameter names to their original error message format + error_messages = { + "column_order": "a list of column names", + "header_names": "a list of header names" + } + + # Use the specific error message if available, otherwise use a generic one + error_msg = error_messages.get(param_name, f"a list for {param_name}") + + if not isinstance(param, list): + raise TypeValidationError(param, error_msg) + + if ensure_strings: + for item in param: + if not isinstance(item, str): + # Maintain original error message format + if param_name == "column_order": + error_msg = "a string for column name" + elif param_name == "header_names": + error_msg = "a string for header name" + else: + error_msg = f"a string for {param_name} element" + raise TypeValidationError(item, error_msg) + + +def _match_key(item_dict, lookup_key): + """Find a matching key in a dictionary, handling type conversion. + + Args: + item_dict: Dictionary to search in + lookup_key: Key to look for, possibly needing type conversion + + Returns: + The matching key or None if no match found + """ + # Direct key match + if lookup_key in item_dict: + return lookup_key + + # Try boolean conversion for 'true'/'false' strings + if isinstance(lookup_key, str): + if lookup_key.lower() == 'true' and True in item_dict: + return True + if lookup_key.lower() == 'false' and False in item_dict: + return False + + # Try numeric conversion for string numbers + if lookup_key.isdigit() and int(lookup_key) in item_dict: + return int(lookup_key) + + # No match found + return None + + +def _build_key_maps(data): + """Build mappings between string keys and original keys. + + Args: + data: List of dictionaries with keys to map + + Returns: + Tuple of (key_map, reverse_key_map) + """ + key_map = {} + reverse_key_map = {} + + # Check if the data list is not empty + if not data: + return key_map, reverse_key_map + + first_dict = data[0] + for orig_key in first_dict.keys(): + # Store string version of the key + str_key = to_text(orig_key) + key_map[str_key] = orig_key + # Also store lowercase version for case-insensitive lookups + reverse_key_map[str_key.lower()] = orig_key + + return key_map, reverse_key_map + + +def _configure_alignments(table, field_names, column_alignments): + """Configure column alignments for the table. + + Args: + table: The PrettyTable instance to configure + field_names: List of field names to align + column_alignments: Dict of column alignments + """ + valid_alignments = {"left", "center", "right", "l", "c", "r"} + + if not isinstance(column_alignments, dict): + return + + for col_name, alignment in column_alignments.items(): + if col_name in field_names: + # We already validated alignment is a string and a valid value in the main function + # Just apply it here + alignment = alignment.lower() + table.align[col_name] = alignment[0] + + +def to_prettytable(data, *args, **kwargs): + """Convert a list of dictionaries to an ASCII table. + + Args: + data: List of dictionaries to format + *args: Optional list of column names to specify column order + **kwargs: Optional keyword arguments: + - column_order: List of column names to specify the order + - header_names: List of custom header names + - column_alignments: Dict of column alignments (left, center, right) + + Returns: + String containing the ASCII table + """ + if not HAS_PRETTYTABLE: + raise AnsibleFilterError( + 'You need to install "prettytable" Python module to use this filter' + ) + + # === Input validation === + # Validate list type + if not isinstance(data, list): + raise TypeValidationError(data, "a list of dictionaries") + + # Validate dictionary items if list is not empty + if data and not all(isinstance(item, dict) for item in data): + invalid_item = next((item for item in data if not isinstance(item, dict)), None) + raise TypeValidationError(invalid_item, "all items in the list to be dictionaries") + + # Get sample dictionary to determine fields - empty if no data + sample_dict = data[0] if data else {} + max_fields = len(sample_dict) + + # === Process column order === + # Handle both positional and keyword column_order + column_order = kwargs.pop('column_order', None) + + # Check for conflict between args and column_order + if args and column_order is not None: + raise AnsibleFilterError("Cannot use both positional arguments and the 'column_order' keyword argument") + + # Use positional args if provided + if args: + column_order = list(args) + + # Validate column_order + if column_order is not None: + _validate_list_param(column_order, "column_order") + + # Validate column_order doesn't exceed the number of fields (skip if data is empty) + if data and len(column_order) > max_fields: + raise AnsibleFilterError( + f"'column_order' has more elements ({len(column_order)}) than available fields in data ({max_fields})") + + # === Process headers === + # Determine field names and ensure they are strings + if column_order: + field_names = column_order + else: + # Use field names from first dictionary, ensuring all are strings + field_names = [to_text(k) for k in sample_dict] + + # Process custom headers + header_names = kwargs.pop('header_names', None) + if header_names is not None: + _validate_list_param(header_names, "header_names") + + # Validate header_names doesn't exceed the number of fields (skip if data is empty) + if data and len(header_names) > max_fields: + raise AnsibleFilterError( + f"'header_names' has more elements ({len(header_names)}) than available fields in data ({max_fields})") + + # Validate that column_order and header_names have the same size if both provided + if column_order is not None and len(column_order) != len(header_names): + raise AnsibleFilterError( + f"'column_order' and 'header_names' must have the same number of elements. " + f"Got {len(column_order)} columns and {len(header_names)} headers.") + + # === Process alignments === + # Get column alignments and validate + column_alignments = kwargs.pop('column_alignments', {}) + valid_alignments = {"left", "center", "right", "l", "c", "r"} + + # Validate column_alignments is a dictionary + if not isinstance(column_alignments, dict): + raise TypeValidationError(column_alignments, "a dictionary for column_alignments") + + # Validate column_alignments keys and values + for key, value in column_alignments.items(): + # Check that keys are strings + if not isinstance(key, str): + raise TypeValidationError(key, "a string for column_alignments key") + + # Check that values are strings + if not isinstance(value, str): + raise TypeValidationError(value, "a string for column_alignments value") + + # Check that values are valid alignments + if value.lower() not in valid_alignments: + raise AnsibleFilterError( + f"Invalid alignment '{value}' in 'column_alignments'. " + f"Valid alignments are: {', '.join(sorted(valid_alignments))}") + + # Validate column_alignments doesn't have more keys than fields (skip if data is empty) + if data and len(column_alignments) > max_fields: + raise AnsibleFilterError( + f"'column_alignments' has more elements ({len(column_alignments)}) than available fields in data ({max_fields})") + + # Check for unknown parameters + if kwargs: + raise AnsibleFilterError(f"Unknown parameter(s) for to_prettytable filter: {', '.join(sorted(kwargs))}") + + # === Build the table === + table = prettytable.PrettyTable() + + # Set the field names for display + display_names = header_names if header_names is not None else field_names + table.field_names = [to_text(name) for name in display_names] + + # Configure alignments after setting field_names + _configure_alignments(table, display_names, column_alignments) + + # Build key maps only if not using explicit column_order and we have data + key_map = {} + reverse_key_map = {} + if not column_order and data: # Only needed when using original dictionary keys and we have data + key_map, reverse_key_map = _build_key_maps(data) + + # If we have an empty list with no custom parameters, return a simple empty table + if not data and not column_order and not header_names and not column_alignments: + return "++\n++" + + # Process each row if we have data + for item in data: + row = [] + for col in field_names: + # Try direct mapping first + if col in key_map: + row.append(item.get(key_map[col], "")) + else: + # Try to find a matching key in the item + matched_key = _match_key(item, col) + if matched_key is not None: + row.append(item.get(matched_key, "")) + else: + # Try case-insensitive lookup as last resort + lower_col = col.lower() if isinstance(col, str) else str(col).lower() + if lower_col in reverse_key_map: + row.append(item.get(reverse_key_map[lower_col], "")) + else: + # No match found + row.append("") + table.add_row(row) + + return to_text(table) + + +class FilterModule(object): + """Ansible core jinja2 filters.""" + + def filters(self): + return { + 'to_prettytable': to_prettytable + } diff --git a/plugins/filter/to_seconds.yml b/plugins/filter/to_seconds.yml index d6e6c4e467..49b69d6d69 100644 --- a/plugins/filter/to_seconds.yml +++ b/plugins/filter/to_seconds.yml @@ -5,7 +5,7 @@ DOCUMENTATION: name: to_seconds - short_description: Converte a duration string to seconds + short_description: Converts a duration string to seconds version_added: 0.2.0 description: - Parse a human readable time duration string and convert to seconds. @@ -13,12 +13,12 @@ DOCUMENTATION: _input: description: - The time string to convert. - - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week, - C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec) - and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s) - can be added to a unit as well, so C(seconds) is the same as C(second). + - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week, + V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec) + and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s) + can be added to a unit as well, so V(seconds) is the same as V(second). - Valid strings are space separated combinations of an integer with an optional minus sign and a unit. - - Examples are C(1h), C(-5m), and C(3h -5m 6s). + - Examples are V(1h), V(-5m), and V(3h -5m 6s). type: string required: true year: diff --git a/plugins/filter/to_time_unit.yml b/plugins/filter/to_time_unit.yml index c0149f0acd..256ca573f4 100644 --- a/plugins/filter/to_time_unit.yml +++ b/plugins/filter/to_time_unit.yml @@ -5,7 +5,7 @@ DOCUMENTATION: name: to_time_unit - short_description: Converte a duration string to the given time unit + short_description: Converts a duration string to the given time unit version_added: 0.2.0 description: - Parse a human readable time duration string and convert to the given time unit. @@ -14,12 +14,12 @@ DOCUMENTATION: _input: description: - The time string to convert. - - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week, - C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec) - and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s) - can be added to a unit as well, so C(seconds) is the same as C(second). + - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week, + V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec) + and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s) + can be added to a unit as well, so V(seconds) is the same as V(second). - Valid strings are space separated combinations of an integer with an optional minus sign and a unit. - - Examples are C(1h), C(-5m), and C(3h -5m 6s). + - Examples are V(1h), V(-5m), and V(3h -5m 6s). type: string required: true unit: diff --git a/plugins/filter/to_weeks.yml b/plugins/filter/to_weeks.yml index 499c386276..750e77c378 100644 --- a/plugins/filter/to_weeks.yml +++ b/plugins/filter/to_weeks.yml @@ -5,7 +5,7 @@ DOCUMENTATION: name: to_weeks - short_description: Converte a duration string to weeks + short_description: Converts a duration string to weeks version_added: 0.2.0 description: - Parse a human readable time duration string and convert to weeks. @@ -13,12 +13,12 @@ DOCUMENTATION: _input: description: - The time string to convert. - - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week, - C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec) - and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s) - can be added to a unit as well, so C(seconds) is the same as C(second). + - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week, + V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec) + and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s) + can be added to a unit as well, so V(seconds) is the same as V(second). - Valid strings are space separated combinations of an integer with an optional minus sign and a unit. - - Examples are C(1h), C(-5m), and C(3h -5m 6s). + - Examples are V(1h), V(-5m), and V(3h -5m 6s). type: string required: true year: diff --git a/plugins/filter/to_yaml.py b/plugins/filter/to_yaml.py new file mode 100644 index 0000000000..905b04271c --- /dev/null +++ b/plugins/filter/to_yaml.py @@ -0,0 +1,113 @@ +# Copyright (c) Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +import typing as t +from collections.abc import Mapping, Set + +from yaml import dump +try: + from yaml.cyaml import CSafeDumper as SafeDumper +except ImportError: + from yaml import SafeDumper + +from ansible.module_utils.common.collections import is_sequence +try: + # This is ansible-core 2.19+ + from ansible.utils.vars import transform_to_native_types + from ansible.parsing.vault import VaultHelper, VaultLib +except ImportError: + transform_to_native_types = None + +from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode +from ansible.utils.unsafe_proxy import AnsibleUnsafe + + +def _to_native_types_compat(value: t.Any, *, redact_value: str | None) -> t.Any: + """Compatibility function for ansible-core 2.18 and before.""" + if value is None: + return value + if isinstance(value, AnsibleUnsafe): + # This only works up to ansible-core 2.18: + return _to_native_types_compat(value._strip_unsafe(), redact_value=redact_value) + # But that's fine, since this code path isn't taken on ansible-core 2.19+ anyway. + if isinstance(value, Mapping): + return { + _to_native_types_compat(key, redact_value=redact_value): _to_native_types_compat(val, redact_value=redact_value) + for key, val in value.items() + } + if isinstance(value, Set): + return {_to_native_types_compat(elt, redact_value=redact_value) for elt in value} + if is_sequence(value): + return [_to_native_types_compat(elt, redact_value=redact_value) for elt in value] + if isinstance(value, AnsibleVaultEncryptedUnicode): + if redact_value is not None: + return redact_value + # This only works up to ansible-core 2.18: + return value.data + # But that's fine, since this code path isn't taken on ansible-core 2.19+ anyway. + if isinstance(value, bytes): + return bytes(value) + if isinstance(value, str): + return str(value) + + return value + + +def _to_native_types(value: t.Any, *, redact: bool) -> t.Any: + if isinstance(value, Mapping): + return {_to_native_types(k, redact=redact): _to_native_types(v, redact=redact) for k, v in value.items()} + if is_sequence(value): + return [_to_native_types(e, redact=redact) for e in value] + if redact: + ciphertext = VaultHelper.get_ciphertext(value, with_tags=False) + if ciphertext and VaultLib.is_encrypted(ciphertext): + return "" + return transform_to_native_types(value, redact=redact) + + +def remove_all_tags(value: t.Any, *, redact_sensitive_values: bool = False) -> t.Any: + """ + Remove all tags from all values in the input. + + If ``redact_sensitive_values`` is ``True``, all sensitive values will be redacted. + """ + if transform_to_native_types is not None: + return _to_native_types(value, redact=redact_sensitive_values) + + return _to_native_types_compat( + value, + redact_value="" if redact_sensitive_values else None, # same string as in ansible-core 2.19 by transform_to_native_types() + ) + + +def to_yaml(value: t.Any, *, redact_sensitive_values: bool = False, default_flow_style: bool | None = None, **kwargs) -> str: + """Serialize input as terse flow-style YAML.""" + return dump( + remove_all_tags(value, redact_sensitive_values=redact_sensitive_values), + Dumper=SafeDumper, + allow_unicode=True, + default_flow_style=default_flow_style, + **kwargs, + ) + + +def to_nice_yaml(value: t.Any, *, redact_sensitive_values: bool = False, indent: int = 2, default_flow_style: bool = False, **kwargs) -> str: + """Serialize input as verbose multi-line YAML.""" + return to_yaml( + value, + redact_sensitive_values=redact_sensitive_values, + default_flow_style=default_flow_style, + indent=indent, + **kwargs, + ) + + +class FilterModule(object): + def filters(self): + return { + 'to_yaml': to_yaml, + 'to_nice_yaml': to_nice_yaml, + } diff --git a/plugins/filter/to_yaml.yml b/plugins/filter/to_yaml.yml new file mode 100644 index 0000000000..066f8d990d --- /dev/null +++ b/plugins/filter/to_yaml.yml @@ -0,0 +1,92 @@ +# Copyright (c) Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION: + name: to_yaml + author: + - Ansible Core Team + - Felix Fontein (@felixfontein) + version_added: 11.3.0 + short_description: Convert variable to YAML string + description: + - Converts an Ansible variable into a YAML string representation, without preserving vaulted strings as P(ansible.builtin.to_yaml#filter). + - This filter functions as a wrapper to the L(Python PyYAML library, https://pypi.org/project/PyYAML/)'s C(yaml.dump) function. + positional: _input + options: + _input: + description: + - A variable or expression that returns a data structure. + type: raw + required: true + indent: + description: + - Number of spaces to indent Python structures, mainly used for display to humans. + type: integer + sort_keys: + description: + - Affects sorting of dictionary keys. + default: true + type: bool + default_style: + description: + - Indicates the style of the scalar. + choices: + - '' + - "'" + - '"' + - '|' + - '>' + type: string + canonical: + description: + - If set to V(true), export tag type to the output. + type: bool + width: + description: + - Set the preferred line width. + type: integer + line_break: + description: + - Specify the line break. + type: string + encoding: + description: + - Specify the output encoding. + type: string + explicit_start: + description: + - If set to V(true), adds an explicit start using C(---). + type: bool + explicit_end: + description: + - If set to V(true), adds an explicit end using C(...). + type: bool + redact_sensitive_values: + description: + - If set to V(true), vaulted strings are replaced by V() instead of being decrypted. + - With future ansible-core versions, this can extend to other strings tagged as sensitive. + - B(Note) that with ansible-core 2.18 and before this might not yield the expected result + since these versions of ansible-core strip the vault information away from strings that are + part of more complex data structures specified in C(vars). + type: bool + default: false + notes: + - More options may be available, see L(PyYAML documentation, https://pyyaml.org/wiki/PyYAMLDocumentation) for details. + - >- + These parameters to C(yaml.dump) are not accepted, as they are overridden internally: O(ignore:allow_unicode). + +EXAMPLES: | + --- + # Dump variable in a template to create a YAML document + value: "{{ github_workflow | community.general.to_yaml }}" + + --- + # Same as above but 'prettier' (equivalent to community.general.to_nice_yaml filter) + value: "{{ docker_config | community.general.to_yaml(indent=2) }}" + +RETURN: + _value: + description: + - The YAML serialized string representing the variable structure inputted. + type: string diff --git a/plugins/filter/to_years.yml b/plugins/filter/to_years.yml index 1a244a276f..62f282a8b6 100644 --- a/plugins/filter/to_years.yml +++ b/plugins/filter/to_years.yml @@ -5,7 +5,7 @@ DOCUMENTATION: name: to_years - short_description: Converte a duration string to years + short_description: Converts a duration string to years version_added: 0.2.0 description: - Parse a human readable time duration string and convert to years. @@ -13,12 +13,12 @@ DOCUMENTATION: _input: description: - The time string to convert. - - Can use the units C(y) and C(year) for a year, C(mo) and C(month) for a month, C(w) and C(week) for a week, - C(d) and C(day) for a day, C(h) and C(hour) for a hour, C(m), C(min) and C(minute) for minutes, C(s), C(sec) - and C(second) for seconds, C(ms), C(msec), C(msecond) and C(millisecond) for milliseconds. The suffix C(s) - can be added to a unit as well, so C(seconds) is the same as C(second). + - Can use the units V(y) and V(year) for a year, V(mo) and V(month) for a month, V(w) and V(week) for a week, + V(d) and V(day) for a day, V(h) and V(hour) for a hour, V(m), V(min) and V(minute) for minutes, V(s), V(sec) + and V(second) for seconds, V(ms), V(msec), V(msecond) and V(millisecond) for milliseconds. The suffix V(s) + can be added to a unit as well, so V(seconds) is the same as V(second). - Valid strings are space separated combinations of an integer with an optional minus sign and a unit. - - Examples are C(1h), C(-5m), and C(3h -5m 6s). + - Examples are V(1h), V(-5m), and V(3h -5m 6s). type: string required: true year: diff --git a/plugins/filter/unicode_normalize.py b/plugins/filter/unicode_normalize.py index dfbf20c573..f1fe18402b 100644 --- a/plugins/filter/unicode_normalize.py +++ b/plugins/filter/unicode_normalize.py @@ -1,56 +1,58 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2021, Andrew Pantuso (@ajpantuso) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: unicode_normalize - short_description: Normalizes unicode strings to facilitate comparison of characters with normalized forms - version_added: 3.7.0 - author: Andrew Pantuso (@Ajpantuso) - description: - - Normalizes unicode strings to facilitate comparison of characters with normalized forms. - positional: form - options: - _input: - description: A unicode string. - type: string - required: true - form: - description: - - The normal form to use. - - See U(https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize) for details. - type: string - default: NFC - choices: - - NFC - - NFD - - NFKC - - NFKD -''' +DOCUMENTATION = r""" +name: unicode_normalize +short_description: Normalizes unicode strings to facilitate comparison of characters with normalized forms +version_added: 3.7.0 +author: Andrew Pantuso (@Ajpantuso) +description: + - Normalizes unicode strings to facilitate comparison of characters with normalized forms. +positional: form +options: + _input: + description: A unicode string. + type: string + required: true + form: + description: + - The normal form to use. + - See U(https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize) for details. + type: string + default: NFC + choices: + - NFC + - NFD + - NFKC + - NFKD +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Normalize unicode string ansible.builtin.set_fact: dictionary: "{{ 'ä' | community.general.unicode_normalize('NFKD') }}" # The resulting string has length 2: one letter is 'a', the other # the diacritic combiner. -''' +""" -RETURN = ''' - _value: - description: The normalized unicode string of the specified normal form. - type: string -''' +RETURN = r""" +_value: + description: The normalized unicode string of the specified normal form. + type: string +""" from unicodedata import normalize -from ansible.errors import AnsibleFilterError, AnsibleFilterTypeError -from ansible.module_utils.six import text_type +from ansible.errors import AnsibleFilterError + +try: + from ansible.errors import AnsibleTypeError +except ImportError: + from ansible.errors import AnsibleFilterTypeError as AnsibleTypeError def unicode_normalize(data, form='NFC'): @@ -65,11 +67,11 @@ def unicode_normalize(data, form='NFC'): A normalized unicode string of the specified 'form'. """ - if not isinstance(data, text_type): - raise AnsibleFilterTypeError("%s is not a valid input type" % type(data)) + if not isinstance(data, str): + raise AnsibleTypeError(f"{type(data)} is not a valid input type") if form not in ('NFC', 'NFD', 'NFKC', 'NFKD'): - raise AnsibleFilterError("%s is not a valid form" % form) + raise AnsibleFilterError(f"{form!r} is not a valid form") return normalize(form, data) diff --git a/plugins/filter/version_sort.py b/plugins/filter/version_sort.py index 09eedbf563..893c7e5bd3 100644 --- a/plugins/filter/version_sort.py +++ b/plugins/filter/version_sort.py @@ -1,39 +1,37 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2021 Eric Lavarde # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: version_sort - short_description: Sort a list according to version order instead of pure alphabetical one - version_added: 2.2.0 - author: Eric L. (@ericzolf) - description: - - Sort a list according to version order instead of pure alphabetical one. - options: - _input: - description: A list of strings to sort. - type: list - elements: string - required: true -''' +DOCUMENTATION = r""" +name: version_sort +short_description: Sort a list according to version order instead of pure alphabetical one +version_added: 2.2.0 +author: Eric L. (@ericzolf) +description: + - Sort a list according to version order instead of pure alphabetical one. +options: + _input: + description: A list of strings to sort. + type: list + elements: string + required: true +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Convert list of tuples into dictionary ansible.builtin.set_fact: dictionary: "{{ ['2.1', '2.10', '2.9'] | community.general.version_sort }}" # Result is ['2.1', '2.9', '2.10'] -''' +""" -RETURN = ''' - _value: - description: The list of strings sorted by version. - type: list - elements: string -''' +RETURN = r""" +_value: + description: The list of strings sorted by version. + type: list + elements: string +""" from ansible_collections.community.general.plugins.module_utils.version import LooseVersion diff --git a/plugins/inventory/cobbler.py b/plugins/inventory/cobbler.py index 936a409aeb..7374193a74 100644 --- a/plugins/inventory/cobbler.py +++ b/plugins/inventory/cobbler.py @@ -1,103 +1,160 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2020 Orion Poplawski # Copyright (c) 2020 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Orion Poplawski (@opoplawski) - name: cobbler - short_description: Cobbler inventory source - version_added: 1.0.0 +DOCUMENTATION = r""" +author: Orion Poplawski (@opoplawski) +name: cobbler +short_description: Cobbler inventory source +version_added: 1.0.0 +description: + - Get inventory hosts from the cobbler service. + - 'Uses a configuration file as an inventory source, it must end in C(.cobbler.yml) or C(.cobbler.yaml) and have a C(plugin: + cobbler) entry.' + - Adds the primary IP addresses to C(cobbler_ipv4_address) and C(cobbler_ipv6_address) host variables if defined in Cobbler. + The primary IP address is defined as the management interface if defined, or the interface who's DNS name matches the + hostname of the system, or else the first interface found. +extends_documentation_fragment: + - inventory_cache +options: + plugin: + description: The name of this plugin, it should always be set to V(community.general.cobbler) for this plugin to recognize + it as its own. + type: string + required: true + choices: ['cobbler', 'community.general.cobbler'] + url: + description: URL to cobbler. + type: string + default: 'http://cobbler/cobbler_api' + env: + - name: COBBLER_SERVER + user: + description: Cobbler authentication user. + type: string + required: false + env: + - name: COBBLER_USER + password: + description: Cobbler authentication password. + type: string + required: false + env: + - name: COBBLER_PASSWORD + cache_fallback: + description: Fallback to cached results if connection to cobbler fails. + type: boolean + default: false + connection_timeout: + description: Timeout to connect to cobbler server. + type: int + required: false + version_added: 10.7.0 + exclude_mgmt_classes: + description: Management classes to exclude from inventory. + type: list + default: [] + elements: str + version_added: 7.4.0 + exclude_profiles: description: - - Get inventory hosts from the cobbler service. - - "Uses a configuration file as an inventory source, it must end in C(.cobbler.yml) or C(.cobbler.yaml) and has a C(plugin: cobbler) entry." - extends_documentation_fragment: - - inventory_cache - options: - plugin: - description: The name of this plugin, it should always be set to C(community.general.cobbler) for this plugin to recognize it as it's own. - required: true - choices: [ 'cobbler', 'community.general.cobbler' ] - url: - description: URL to cobbler. - default: 'http://cobbler/cobbler_api' - env: - - name: COBBLER_SERVER - user: - description: Cobbler authentication user. - required: false - env: - - name: COBBLER_USER - password: - description: Cobbler authentication password - required: false - env: - - name: COBBLER_PASSWORD - cache_fallback: - description: Fallback to cached results if connection to cobbler fails - type: boolean - default: false - exclude_profiles: - description: - - Profiles to exclude from inventory. - - Ignored if I(include_profiles) is specified. - type: list - default: [] - elements: str - include_profiles: - description: - - Profiles to include from inventory. - - If specified, all other profiles will be excluded. - - I(exclude_profiles) is ignored if I(include_profiles) is specified. - type: list - default: [] - elements: str - version_added: 4.4.0 - group_by: - description: Keys to group hosts by - type: list - elements: string - default: [ 'mgmt_classes', 'owners', 'status' ] - group: - description: Group to place all hosts into - default: cobbler - group_prefix: - description: Prefix to apply to cobbler groups - default: cobbler_ - want_facts: - description: Toggle, if C(true) the plugin will retrieve host facts from the server - type: boolean - default: true -''' + - Profiles to exclude from inventory. + - Ignored if O(include_profiles) is specified. + type: list + default: [] + elements: str + include_mgmt_classes: + description: Management classes to include from inventory. + type: list + default: [] + elements: str + version_added: 7.4.0 + include_profiles: + description: + - Profiles to include from inventory. + - If specified, all other profiles are excluded. + - O(exclude_profiles) is ignored if O(include_profiles) is specified. + type: list + default: [] + elements: str + version_added: 4.4.0 + inventory_hostname: + description: + - What to use for the ansible inventory hostname. + - By default the networking hostname is used if defined, otherwise the DNS name of the management or first non-static + interface. + - If set to V(system), the cobbler system name is used. + type: str + choices: ['hostname', 'system'] + default: hostname + version_added: 7.1.0 + group_by: + description: Keys to group hosts by. + type: list + elements: string + default: ['mgmt_classes', 'owners', 'status'] + group: + description: Group to place all hosts into. + default: cobbler + group_prefix: + description: Prefix to apply to cobbler groups. + default: cobbler_ + want_facts: + description: Toggle, if V(true) the plugin retrieves all host facts from the server. + type: boolean + default: true + want_ip_addresses: + description: + - Toggle, if V(true) the plugin adds a C(cobbler_ipv4_addresses) and C(cobbler_ipv6_addresses) dictionary to the + defined O(group) mapping interface DNS names to IP addresses. + type: boolean + default: true + version_added: 7.1.0 + facts_level: + description: + - Set to V(normal) to gather only system-level variables. + - Set to V(as_rendered) to gather all variables as rolled up by Cobbler. + type: string + choices: ['normal', 'as_rendered'] + default: normal + version_added: 10.7.0 +""" -EXAMPLES = ''' +EXAMPLES = r""" # my.cobbler.yml plugin: community.general.cobbler url: http://cobbler/cobbler_api user: ansible-tester password: secure -''' +""" import socket from ansible.errors import AnsibleError -from ansible.module_utils.common.text.converters import to_text -from ansible.module_utils.six import iteritems from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, to_safe_group_name +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe + # xmlrpc try: - import xmlrpclib as xmlrpc_client + import xmlrpc.client as xmlrpc_client HAS_XMLRPC_CLIENT = True except ImportError: - try: - import xmlrpc.client as xmlrpc_client - HAS_XMLRPC_CLIENT = True - except ImportError: - HAS_XMLRPC_CLIENT = False + HAS_XMLRPC_CLIENT = False + + +class TimeoutTransport (xmlrpc_client.SafeTransport): + def __init__(self, timeout=socket._GLOBAL_DEFAULT_TIMEOUT): + super(TimeoutTransport, self).__init__() + self._timeout = timeout + self.context = None + + def make_connection(self, host): + conn = xmlrpc_client.SafeTransport.make_connection(self, host) + conn.timeout = self._timeout + return conn class InventoryModule(BaseInventoryPlugin, Cacheable): @@ -108,7 +165,9 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): def __init__(self): super(InventoryModule, self).__init__() self.cache_key = None - self.connection = None + + if not HAS_XMLRPC_CLIENT: + raise AnsibleError('Could not import xmlrpc client library') def verify_file(self, path): valid = False @@ -119,18 +178,6 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): self.display.vvv('Skipping due to inventory source not ending in "cobbler.yaml" nor "cobbler.yml"') return valid - def _get_connection(self): - if not HAS_XMLRPC_CLIENT: - raise AnsibleError('Could not import xmlrpc client library') - - if self.connection is None: - self.display.vvvv('Connecting to %s\n' % self.cobbler_url) - self.connection = xmlrpc_client.Server(self.cobbler_url, allow_none=True) - self.token = None - if self.get_option('user') is not None: - self.token = self.connection.login(self.get_option('user'), self.get_option('password')) - return self.connection - def _init_cache(self): if self.cache_key not in self._cache: self._cache[self.cache_key] = {} @@ -144,12 +191,11 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): def _get_profiles(self): if not self.use_cache or 'profiles' not in self._cache.get(self.cache_key, {}): - c = self._get_connection() try: if self.token is not None: - data = c.get_profiles(self.token) + data = self.cobbler.get_profiles(self.token) else: - data = c.get_profiles() + data = self.cobbler.get_profiles() except (socket.gaierror, socket.error, xmlrpc_client.ProtocolError): self._reload_cache() else: @@ -160,12 +206,20 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): def _get_systems(self): if not self.use_cache or 'systems' not in self._cache.get(self.cache_key, {}): - c = self._get_connection() try: if self.token is not None: - data = c.get_systems(self.token) + data = self.cobbler.get_systems(self.token) else: - data = c.get_systems() + data = self.cobbler.get_systems() + + # If more facts are requested, gather them all from Cobbler + if self.facts_level == "as_rendered": + for i, host in enumerate(data): + self.display.vvvv(f"Gathering all facts for {host['name']}\n") + if self.token is not None: + data[i] = self.cobbler.get_system_as_rendered(host['name'], self.token) + else: + data[i] = self.cobbler.get_system_as_rendered(host['name']) except (socket.gaierror, socket.error, xmlrpc_client.ProtocolError): self._reload_cache() else: @@ -175,7 +229,7 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): return self._cache[self.cache_key]['systems'] def _add_safe_group_name(self, group, child=None): - group_name = self.inventory.add_group(to_safe_group_name('%s%s' % (self.get_option('group_prefix'), group.lower().replace(" ", "")))) + group_name = self.inventory.add_group(to_safe_group_name(f"{self.get_option('group_prefix')}{group.lower().replace(' ', '')}")) if child is not None: self.inventory.add_child(group_name, child) return group_name @@ -195,25 +249,40 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): # get connection host self.cobbler_url = self.get_option('url') + self.display.vvvv(f'Connecting to {self.cobbler_url}\n') + + if 'connection_timeout' in self._options: + self.cobbler = xmlrpc_client.Server(self.cobbler_url, allow_none=True, + transport=TimeoutTransport(timeout=self.get_option('connection_timeout'))) + else: + self.cobbler = xmlrpc_client.Server(self.cobbler_url, allow_none=True) + self.token = None + if self.get_option('user') is not None: + self.token = self.cobbler.login(str(self.get_option('user')), str(self.get_option('password'))) + self.cache_key = self.get_cache_key(path) self.use_cache = cache and self.get_option('cache') + self.exclude_mgmt_classes = self.get_option('exclude_mgmt_classes') + self.include_mgmt_classes = self.get_option('include_mgmt_classes') self.exclude_profiles = self.get_option('exclude_profiles') self.include_profiles = self.get_option('include_profiles') self.group_by = self.get_option('group_by') + self.inventory_hostname = self.get_option('inventory_hostname') + self.facts_level = self.get_option('facts_level') for profile in self._get_profiles(): if profile['parent']: - self.display.vvvv('Processing profile %s with parent %s\n' % (profile['name'], profile['parent'])) + self.display.vvvv(f"Processing profile {profile['name']} with parent {profile['parent']}\n") if not self._exclude_profile(profile['parent']): parent_group_name = self._add_safe_group_name(profile['parent']) - self.display.vvvv('Added profile parent group %s\n' % parent_group_name) + self.display.vvvv(f'Added profile parent group {parent_group_name}\n') if not self._exclude_profile(profile['name']): group_name = self._add_safe_group_name(profile['name']) - self.display.vvvv('Added profile group %s\n' % group_name) + self.display.vvvv(f'Added profile group {group_name}\n') self.inventory.add_child(parent_group_name, group_name) else: - self.display.vvvv('Processing profile %s without parent\n' % profile['name']) + self.display.vvvv(f"Processing profile {profile['name']} without parent\n") # Create a hierarchy of profile names profile_elements = profile['name'].split('-') i = 0 @@ -221,12 +290,12 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): profile_group = '-'.join(profile_elements[0:i + 1]) profile_group_child = '-'.join(profile_elements[0:i + 2]) if self._exclude_profile(profile_group): - self.display.vvvv('Excluding profile %s\n' % profile_group) + self.display.vvvv(f'Excluding profile {profile_group}\n') break group_name = self._add_safe_group_name(profile_group) - self.display.vvvv('Added profile group %s\n' % group_name) + self.display.vvvv(f'Added profile group {group_name}\n') child_group_name = self._add_safe_group_name(profile_group_child) - self.display.vvvv('Added profile child group %s to %s\n' % (child_group_name, group_name)) + self.display.vvvv(f'Added profile child group {child_group_name} to {group_name}\n') self.inventory.add_child(group_name, child_group_name) i = i + 1 @@ -234,54 +303,112 @@ class InventoryModule(BaseInventoryPlugin, Cacheable): self.group = to_safe_group_name(self.get_option('group')) if self.group is not None and self.group != '': self.inventory.add_group(self.group) - self.display.vvvv('Added site group %s\n' % self.group) + self.display.vvvv(f'Added site group {self.group}\n') + ip_addresses = {} + ipv6_addresses = {} for host in self._get_systems(): # Get the FQDN for the host and add it to the right groups - hostname = host['hostname'] # None + if self.inventory_hostname == 'system': + hostname = make_unsafe(host['name']) # None + else: + hostname = make_unsafe(host['hostname']) # None interfaces = host['interfaces'] - if self._exclude_profile(host['profile']): - self.display.vvvv('Excluding host %s in profile %s\n' % (host['name'], host['profile'])) - continue + if set(host['mgmt_classes']) & set(self.include_mgmt_classes): + self.display.vvvv(f"Including host {host['name']} in mgmt_classes {host['mgmt_classes']}\n") + else: + if self._exclude_profile(host['profile']): + self.display.vvvv(f"Excluding host {host['name']} in profile {host['profile']}\n") + continue + + if set(host['mgmt_classes']) & set(self.exclude_mgmt_classes): + self.display.vvvv(f"Excluding host {host['name']} in mgmt_classes {host['mgmt_classes']}\n") + continue # hostname is often empty for non-static IP hosts if hostname == '': - for (iname, ivalue) in iteritems(interfaces): + for iname, ivalue in interfaces.items(): if ivalue['management'] or not ivalue['static']: this_dns_name = ivalue.get('dns_name', None) if this_dns_name is not None and this_dns_name != "": - hostname = this_dns_name - self.display.vvvv('Set hostname to %s from %s\n' % (hostname, iname)) + hostname = make_unsafe(this_dns_name) + self.display.vvvv(f'Set hostname to {hostname} from {iname}\n') if hostname == '': - self.display.vvvv('Cannot determine hostname for host %s, skipping\n' % host['name']) + self.display.vvvv(f"Cannot determine hostname for host {host['name']}, skipping\n") continue self.inventory.add_host(hostname) - self.display.vvvv('Added host %s hostname %s\n' % (host['name'], hostname)) + self.display.vvvv(f"Added host {host['name']} hostname {hostname}\n") # Add host to profile group - group_name = self._add_safe_group_name(host['profile'], child=hostname) - self.display.vvvv('Added host %s to profile group %s\n' % (hostname, group_name)) + if host['profile'] != '': + group_name = self._add_safe_group_name(host['profile'], child=hostname) + self.display.vvvv(f'Added host {hostname} to profile group {group_name}\n') + else: + self.display.warning(f'Host {hostname} has an empty profile\n') # Add host to groups specified by group_by fields for group_by in self.group_by: - if host[group_by] == '<>': + if host[group_by] == '<>' or host[group_by] == '': groups = [] else: groups = [host[group_by]] if isinstance(host[group_by], str) else host[group_by] for group in groups: group_name = self._add_safe_group_name(group, child=hostname) - self.display.vvvv('Added host %s to group_by %s group %s\n' % (hostname, group_by, group_name)) + self.display.vvvv(f'Added host {hostname} to group_by {group_by} group {group_name}\n') # Add to group for this inventory if self.group is not None: self.inventory.add_child(self.group, hostname) # Add host variables + ip_address = None + ip_address_first = None + ipv6_address = None + ipv6_address_first = None + for iname, ivalue in interfaces.items(): + # Set to first interface or management interface if defined or hostname matches dns_name + if ivalue['ip_address'] != "": + if ip_address_first is None: + ip_address_first = ivalue['ip_address'] + if ivalue['management']: + ip_address = ivalue['ip_address'] + elif ivalue['dns_name'] == hostname and ip_address is None: + ip_address = ivalue['ip_address'] + if ivalue['ipv6_address'] != "": + if ipv6_address_first is None: + ipv6_address_first = ivalue['ipv6_address'] + if ivalue['management']: + ipv6_address = ivalue['ipv6_address'] + elif ivalue['dns_name'] == hostname and ipv6_address is None: + ipv6_address = ivalue['ipv6_address'] + + # Collect all interface name mappings for adding to group vars + if self.get_option('want_ip_addresses'): + if ivalue['dns_name'] != "": + if ivalue['ip_address'] != "": + ip_addresses[ivalue['dns_name']] = ivalue['ip_address'] + if ivalue['ipv6_address'] != "": + ip_addresses[ivalue['dns_name']] = ivalue['ipv6_address'] + + # Add ip_address to host if defined, use first if no management or matched dns_name + if ip_address is None and ip_address_first is not None: + ip_address = ip_address_first + if ip_address is not None: + self.inventory.set_variable(hostname, 'cobbler_ipv4_address', make_unsafe(ip_address)) + if ipv6_address is None and ipv6_address_first is not None: + ipv6_address = ipv6_address_first + if ipv6_address is not None: + self.inventory.set_variable(hostname, 'cobbler_ipv6_address', make_unsafe(ipv6_address)) + if self.get_option('want_facts'): try: - self.inventory.set_variable(hostname, 'cobbler', host) + self.inventory.set_variable(hostname, 'cobbler', make_unsafe(host)) except ValueError as e: - self.display.warning("Could not set host info for %s: %s" % (hostname, to_text(e))) + self.display.warning(f"Could not set host info for {hostname}: {e}") + + if self.get_option('want_ip_addresses'): + self.inventory.set_variable(self.group, 'cobbler_ipv4_addresses', make_unsafe(ip_addresses)) + self.inventory.set_variable(self.group, 'cobbler_ipv6_addresses', make_unsafe(ipv6_addresses)) diff --git a/plugins/inventory/gitlab_runners.py b/plugins/inventory/gitlab_runners.py index d68b8d4e28..4a2b32680e 100644 --- a/plugins/inventory/gitlab_runners.py +++ b/plugins/inventory/gitlab_runners.py @@ -1,68 +1,67 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Stefan Heitmueller # Copyright (c) 2018 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' - name: gitlab_runners - author: - - Stefan Heitmüller (@morph027) - short_description: Ansible dynamic inventory plugin for GitLab runners. - requirements: - - python >= 2.7 - - python-gitlab > 1.8.0 - extends_documentation_fragment: - - constructed - description: - - Reads inventories from the GitLab API. - - Uses a YAML configuration file gitlab_runners.[yml|yaml]. - options: - plugin: - description: The name of this plugin, it should always be set to 'gitlab_runners' for this plugin to recognize it as it's own. - type: str - required: true - choices: - - gitlab_runners - - community.general.gitlab_runners - server_url: - description: The URL of the GitLab server, with protocol (i.e. http or https). - env: - - name: GITLAB_SERVER_URL - version_added: 1.0.0 - type: str - required: true - api_token: - description: GitLab token for logging in. - env: - - name: GITLAB_API_TOKEN - version_added: 1.0.0 - type: str - aliases: - - private_token - - access_token - filter: - description: filter runners from GitLab API - env: - - name: GITLAB_FILTER - version_added: 1.0.0 - type: str - choices: ['active', 'paused', 'online', 'specific', 'shared'] - verbose_output: - description: Toggle to (not) include all available nodes metadata - type: bool - default: true -''' +DOCUMENTATION = r""" +name: gitlab_runners +author: + - Stefan Heitmüller (@morph027) +short_description: Ansible dynamic inventory plugin for GitLab runners +requirements: + - python-gitlab > 1.8.0 +extends_documentation_fragment: + - constructed +description: + - Reads inventories from the GitLab API. + - Uses a YAML configuration file gitlab_runners.[yml|yaml]. +options: + plugin: + description: The name of this plugin, it should always be set to V(gitlab_runners) for this plugin to recognize it as its own. + type: str + required: true + choices: + - gitlab_runners + - community.general.gitlab_runners + server_url: + description: The URL of the GitLab server, with protocol (i.e. http or https). + env: + - name: GITLAB_SERVER_URL + version_added: 1.0.0 + type: str + required: true + api_token: + description: GitLab token for logging in. + env: + - name: GITLAB_API_TOKEN + version_added: 1.0.0 + type: str + aliases: + - private_token + - access_token + filter: + description: Filter runners from GitLab API. + env: + - name: GITLAB_FILTER + version_added: 1.0.0 + type: str + choices: ['active', 'paused', 'online', 'specific', 'shared'] + verbose_output: + description: Toggle to (not) include all available nodes metadata. + type: bool + default: true +""" -EXAMPLES = ''' +EXAMPLES = r""" +--- # gitlab_runners.yml plugin: community.general.gitlab_runners host: https://gitlab.com +--- # Example using constructed features to create groups and set ansible_host plugin: community.general.gitlab_runners host: https://gitlab.com @@ -79,12 +78,13 @@ keyed_groups: # hint: labels containing special characters will be converted to safe names - key: 'tag_list' prefix: tag -''' +""" from ansible.errors import AnsibleError, AnsibleParserError -from ansible.module_utils.common.text.converters import to_native from ansible.plugins.inventory import BaseInventoryPlugin, Constructable +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe + try: import gitlab HAS_GITLAB = True @@ -106,11 +106,11 @@ class InventoryModule(BaseInventoryPlugin, Constructable): else: runners = gl.runners.all() for runner in runners: - host = str(runner['id']) + host = make_unsafe(str(runner['id'])) ip_address = runner['ip_address'] - host_attrs = vars(gl.runners.get(runner['id']))['_attrs'] + host_attrs = make_unsafe(vars(gl.runners.get(runner['id']))['_attrs']) self.inventory.add_host(host, group='gitlab_runners') - self.inventory.set_variable(host, 'ansible_host', ip_address) + self.inventory.set_variable(host, 'ansible_host', make_unsafe(ip_address)) if self.get_option('verbose_output', True): self.inventory.set_variable(host, 'gitlab_runner_attributes', host_attrs) @@ -123,7 +123,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): # Create groups based on variable values and add the corresponding hosts to it self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host_attrs, host, strict=strict) except Exception as e: - raise AnsibleParserError('Unable to fetch hosts from GitLab API, this was the original exception: %s' % to_native(e)) + raise AnsibleParserError(f'Unable to fetch hosts from GitLab API, this was the original exception: {e}') def verify_file(self, path): """Return the possibly of a file being consumable by this plugin.""" diff --git a/plugins/inventory/icinga2.py b/plugins/inventory/icinga2.py index 70e0f57332..017959f403 100644 --- a/plugins/inventory/icinga2.py +++ b/plugins/inventory/icinga2.py @@ -1,78 +1,81 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2021, Cliff Hults # Copyright (c) 2021 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' - name: icinga2 - short_description: Icinga2 inventory source - version_added: 3.7.0 - author: - - Cliff Hults (@BongoEADGC6) +DOCUMENTATION = r""" +name: icinga2 +short_description: Icinga2 inventory source +version_added: 3.7.0 +author: + - Cliff Hults (@BongoEADGC6) +description: + - Get inventory hosts from the Icinga2 API. + - Uses a configuration file as an inventory source, it must end in C(.icinga2.yml) or C(.icinga2.yaml). +extends_documentation_fragment: + - constructed +options: + strict: + version_added: 4.4.0 + compose: + version_added: 4.4.0 + groups: + version_added: 4.4.0 + keyed_groups: + version_added: 4.4.0 + plugin: + description: Name of the plugin. + required: true + type: string + choices: ['community.general.icinga2'] + url: + description: Root URL of Icinga2 API. + type: string + required: true + user: + description: Username to query the API. + type: string + required: true + password: + description: Password to query the API. + type: string + required: true + host_filter: description: - - Get inventory hosts from the Icinga2 API. - - "Uses a configuration file as an inventory source, it must end in - C(.icinga2.yml) or C(.icinga2.yaml)." - extends_documentation_fragment: - - constructed - options: - strict: - version_added: 4.4.0 - compose: - version_added: 4.4.0 - groups: - version_added: 4.4.0 - keyed_groups: - version_added: 4.4.0 - plugin: - description: Name of the plugin. - required: true - type: string - choices: ['community.general.icinga2'] - url: - description: Root URL of Icinga2 API. - type: string - required: true - user: - description: Username to query the API. - type: string - required: true - password: - description: Password to query the API. - type: string - required: true - host_filter: - description: - - An Icinga2 API valid host filter. Leave blank for no filtering - type: string - required: false - validate_certs: - description: Enables or disables SSL certificate verification. - type: boolean - default: true - inventory_attr: - description: - - Allows the override of the inventory name based on different attributes. - - This allows for changing the way limits are used. - - The current default, C(address), is sometimes not unique or present. We recommend to use C(name) instead. - type: string - default: address - choices: ['name', 'display_name', 'address'] - version_added: 4.2.0 -''' + - An Icinga2 API valid host filter. Leave blank for no filtering. + type: string + required: false + validate_certs: + description: Enables or disables SSL certificate verification. + type: boolean + default: true + inventory_attr: + description: + - Allows the override of the inventory name based on different attributes. + - This allows for changing the way limits are used. + - The current default, V(address), is sometimes not unique or present. We recommend to use V(name) instead. + type: string + default: address + choices: ['name', 'display_name', 'address'] + version_added: 4.2.0 + group_by_hostgroups: + description: + - Uses Icinga2 hostgroups as groups. + type: boolean + default: true + version_added: 8.4.0 +""" -EXAMPLES = r''' +EXAMPLES = r""" # my.icinga2.yml plugin: community.general.icinga2 url: http://localhost:5665 user: ansible password: secure host_filter: \"linux-servers\" in host.groups -validate_certs: false +validate_certs: false # only do this when connecting to localhost! inventory_attr: name groups: # simple name matching @@ -88,14 +91,16 @@ compose: # set 'ansible_user' and 'ansible_port' from icinga2 host vars ansible_user: icinga2_attributes.vars.ansible_user ansible_port: icinga2_attributes.vars.ansible_port | default(22) -''' +""" import json +from urllib.error import HTTPError from ansible.errors import AnsibleParserError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable from ansible.module_utils.urls import open_url -from ansible.module_utils.six.moves.urllib.error import HTTPError + +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe class InventoryModule(BaseInventoryPlugin, Constructable): @@ -114,6 +119,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): self.ssl_verify = None self.host_filter = None self.inventory_attr = None + self.group_by_hostgroups = None self.cache_key = None self.use_cache = None @@ -132,7 +138,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): 'User-Agent': "ansible-icinga2-inv", 'Accept': "application/json", } - api_status_url = self.icinga2_url + "/status" + api_status_url = f"{self.icinga2_url}/status" request_args = { 'headers': self.headers, 'url_username': self.icinga2_user, @@ -142,7 +148,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): open_url(api_status_url, **request_args) def _post_request(self, request_url, data=None): - self.display.vvv("Requested URL: %s" % request_url) + self.display.vvv(f"Requested URL: {request_url}") request_args = { 'headers': self.headers, 'url_username': self.icinga2_user, @@ -151,42 +157,38 @@ class InventoryModule(BaseInventoryPlugin, Constructable): } if data is not None: request_args['data'] = json.dumps(data) - self.display.vvv("Request Args: %s" % request_args) + self.display.vvv(f"Request Args: {request_args}") try: response = open_url(request_url, **request_args) except HTTPError as e: try: error_body = json.loads(e.read().decode()) - self.display.vvv("Error returned: {0}".format(error_body)) + self.display.vvv(f"Error returned: {error_body}") except Exception: error_body = {"status": None} if e.code == 404 and error_body.get('status') == "No objects found.": raise AnsibleParserError("Host filter returned no data. Please confirm your host_filter value is valid") - raise AnsibleParserError("Unexpected data returned: {0} -- {1}".format(e, error_body)) + raise AnsibleParserError(f"Unexpected data returned: {e} -- {error_body}") response_body = response.read() json_data = json.loads(response_body.decode('utf-8')) - self.display.vvv("Returned Data: %s" % json.dumps(json_data, indent=4, sort_keys=True)) + self.display.vvv(f"Returned Data: {json.dumps(json_data, indent=4, sort_keys=True)}") if 200 <= response.status <= 299: return json_data if response.status == 404 and json_data['status'] == "No objects found.": raise AnsibleParserError( - "API returned no data -- Response: %s - %s" - % (response.status, json_data['status'])) + f"API returned no data -- Response: {response.status} - {json_data['status']}") if response.status == 401: raise AnsibleParserError( - "API was unable to complete query -- Response: %s - %s" - % (response.status, json_data['status'])) + f"API was unable to complete query -- Response: {response.status} - {json_data['status']}") if response.status == 500: raise AnsibleParserError( - "API Response - %s - %s" - % (json_data['status'], json_data['errors'])) + f"API Response - {json_data['status']} - {json_data['errors']}") raise AnsibleParserError( - "Unexpected data returned - %s - %s" - % (json_data['status'], json_data['errors'])) + f"Unexpected data returned - {json_data['status']} - {json_data['errors']}") def _query_hosts(self, hosts=None, attrs=None, joins=None, host_filter=None): - query_hosts_url = "{0}/objects/hosts".format(self.icinga2_url) + query_hosts_url = f"{self.icinga2_url}/objects/hosts" self.headers['X-HTTP-Method-Override'] = 'GET' data_dict = dict() if hosts: @@ -233,31 +235,32 @@ class InventoryModule(BaseInventoryPlugin, Constructable): """Convert Icinga2 API data to JSON format for Ansible""" groups_dict = {"_meta": {"hostvars": {}}} for entry in json_data: - host_attrs = entry['attrs'] + host_attrs = make_unsafe(entry['attrs']) if self.inventory_attr == "name": - host_name = entry.get('name') + host_name = make_unsafe(entry.get('name')) if self.inventory_attr == "address": # When looking for address for inventory, if missing fallback to object name if host_attrs.get('address', '') != '': - host_name = host_attrs.get('address') + host_name = make_unsafe(host_attrs.get('address')) else: - host_name = entry.get('name') + host_name = make_unsafe(entry.get('name')) if self.inventory_attr == "display_name": host_name = host_attrs.get('display_name') if host_attrs['state'] == 0: host_attrs['state'] = 'on' else: host_attrs['state'] = 'off' - host_groups = host_attrs.get('groups') self.inventory.add_host(host_name) - for group in host_groups: - if group not in self.inventory.groups.keys(): - self.inventory.add_group(group) - self.inventory.add_child(group, host_name) + if self.group_by_hostgroups: + host_groups = host_attrs.get('groups') + for group in host_groups: + if group not in self.inventory.groups.keys(): + self.inventory.add_group(group) + self.inventory.add_child(group, host_name) # If the address attribute is populated, override ansible_host with the value if host_attrs.get('address') != '': self.inventory.set_variable(host_name, 'ansible_host', host_attrs.get('address')) - self.inventory.set_variable(host_name, 'hostname', entry.get('name')) + self.inventory.set_variable(host_name, 'hostname', make_unsafe(entry.get('name'))) self.inventory.set_variable(host_name, 'display_name', host_attrs.get('display_name')) self.inventory.set_variable(host_name, 'state', host_attrs['state']) @@ -277,12 +280,23 @@ class InventoryModule(BaseInventoryPlugin, Constructable): self._read_config_data(path) # Store the options from the YAML file - self.icinga2_url = self.get_option('url').rstrip('/') + '/v1' + self.icinga2_url = self.get_option('url') self.icinga2_user = self.get_option('user') self.icinga2_password = self.get_option('password') self.ssl_verify = self.get_option('validate_certs') self.host_filter = self.get_option('host_filter') self.inventory_attr = self.get_option('inventory_attr') + self.group_by_hostgroups = self.get_option('group_by_hostgroups') + + if self.templar.is_template(self.icinga2_url): + self.icinga2_url = self.templar.template(variable=self.icinga2_url) + if self.templar.is_template(self.icinga2_user): + self.icinga2_user = self.templar.template(variable=self.icinga2_user) + if self.templar.is_template(self.icinga2_password): + self.icinga2_password = self.templar.template(variable=self.icinga2_password) + + self.icinga2_url = f"{self.icinga2_url.rstrip('/')}/v1" + # Not currently enabled # self.cache_key = self.get_cache_key(path) # self.use_cache = cache and self.get_option('cache') diff --git a/plugins/inventory/iocage.py b/plugins/inventory/iocage.py new file mode 100644 index 0000000000..9d4cef4a03 --- /dev/null +++ b/plugins/inventory/iocage.py @@ -0,0 +1,418 @@ + +# Copyright (c) 2024 Vladimir Botka +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: iocage +short_description: C(iocage) inventory source +version_added: 10.2.0 +author: + - Vladimir Botka (@vbotka) +requirements: + - iocage >= 1.8 +description: + - Get inventory hosts from the C(iocage) jail manager running on O(host). + - By default, O(host) is V(localhost). If O(host) is not V(localhost) it is expected that the user running Ansible on the + controller can connect to the O(host) account O(user) with SSH non-interactively and execute the command C(iocage list). + - Uses a configuration file as an inventory source, it must end in C(.iocage.yml) or C(.iocage.yaml). +extends_documentation_fragment: + - ansible.builtin.constructed + - ansible.builtin.inventory_cache +options: + plugin: + description: + - The name of this plugin, it should always be set to V(community.general.iocage) for this plugin to recognize it as + its own. + required: true + choices: ['community.general.iocage'] + type: str + host: + description: The IP/hostname of the C(iocage) host. + type: str + default: localhost + user: + description: + - C(iocage) user. It is expected that the O(user) is able to connect to the O(host) with SSH and execute the command + C(iocage list). This option is not required if O(host=localhost). + type: str + sudo: + description: + - Enable execution as root. + - This requires passwordless sudo of the command C(iocage list*). + type: bool + default: false + version_added: 10.3.0 + sudo_preserve_env: + description: + - Preserve environment if O(sudo) is enabled. + - This requires C(SETENV) sudoers tag. + type: bool + default: false + version_added: 10.3.0 + get_properties: + description: + - Get jails' properties. Creates dictionary C(iocage_properties) for each added host. + type: bool + default: false + env: + description: + - O(user)'s environment on O(host). + - Enable O(sudo_preserve_env) if O(sudo) is enabled. + type: dict + default: {} + hooks_results: + description: + - List of paths to the files in a jail. + - Content of the files is stored in the items of the list C(iocage_hooks). + - If a file is not available the item keeps the dash character C(-). + - The variable C(iocage_hooks) is not created if O(hooks_results) is empty. + type: list + elements: path + version_added: 10.4.0 + inventory_hostname_tag: + description: + - The name of the tag in the C(iocage properties notes) that contains the jails alias. + - By default, the C(iocage list -l) column C(NAME) is used to name the jail. + - This option requires the notes format C("t1=v1 t2=v2 ..."). + - The option O(get_properties) must be enabled. + type: str + version_added: 11.0.0 + inventory_hostname_required: + description: + - If enabled, the tag declared in O(inventory_hostname_tag) is required. + type: bool + default: false + version_added: 11.0.0 +notes: + - You might want to test the command C(ssh user@host iocage list -l) on the controller before using this inventory plugin + with O(user) specified and with O(host) other than V(localhost). + - If you run this inventory plugin on V(localhost) C(ssh) is not used. In this case, test the command C(iocage list -l). + - This inventory plugin creates variables C(iocage_*) for each added host. + - The values of these variables are collected from the output of the command C(iocage list -l). + - The names of these variables correspond to the output columns. + - The column C(NAME) is used to name the added host. + - The option O(hooks_results) expects the C(poolname) of a jail is mounted to C(/poolname). For example, if you activate + the pool C(iocage) this plugin expects to find the O(hooks_results) items in the path C(/iocage/iocage/jails//root). + If you mount the C(poolname) to a different path the easiest remedy is to create a symlink. +""" + +EXAMPLES = r""" +--- +# file name must end with iocage.yaml or iocage.yml +plugin: community.general.iocage +host: 10.1.0.73 +user: admin + +--- +# user is not required if iocage is running on localhost (default) +plugin: community.general.iocage + +--- +# run cryptography without legacy algorithms +plugin: community.general.iocage +host: 10.1.0.73 +user: admin +env: + CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1 + +--- +# execute as root +# sudoers example 'admin ALL=(ALL) NOPASSWD:SETENV: /usr/local/bin/iocage list*' +plugin: community.general.iocage +host: 10.1.0.73 +user: admin +sudo: true +sudo_preserve_env: true +env: + CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1 + +--- +# enable cache +plugin: community.general.iocage +host: 10.1.0.73 +user: admin +env: + CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1 +cache: true + +--- +# see inventory plugin ansible.builtin.constructed +plugin: community.general.iocage +host: 10.1.0.73 +user: admin +env: + CRYPTOGRAPHY_OPENSSL_NO_LEGACY: 1 +cache: true +strict: false +compose: + ansible_host: iocage_ip4 + release: iocage_release | split('-') | first +groups: + test: inventory_hostname.startswith('test') +keyed_groups: + - prefix: distro + key: iocage_release + - prefix: state + key: iocage_state + +--- +# Read the file /var/db/dhclient-hook.address.epair0b in the jails and use it as ansible_host +plugin: community.general.iocage +host: 10.1.0.73 +user: admin +hooks_results: + - /var/db/dhclient-hook.address.epair0b +compose: + ansible_host: iocage_hooks.0 +groups: + test: inventory_hostname.startswith('test') +""" + +import re +import os +from subprocess import Popen, PIPE + +from ansible.errors import AnsibleError, AnsibleParserError +from ansible.module_utils.common.text.converters import to_native, to_text +from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable +from ansible.utils.display import Display + +display = Display() + + +def _parse_ip4(ip4): + ''' Return dictionary iocage_ip4_dict. default = {ip4: [], msg: ''}. + If item matches ifc|IP or ifc|CIDR parse ifc, ip, and mask. + Otherwise, append item to msg. + ''' + + iocage_ip4_dict = {} + iocage_ip4_dict['ip4'] = [] + iocage_ip4_dict['msg'] = '' + + items = ip4.split(',') + for item in items: + if re.match('^\\w+\\|(?:\\d{1,3}\\.){3}\\d{1,3}.*$', item): + i = re.split('\\||/', item) + if len(i) == 3: + iocage_ip4_dict['ip4'].append({'ifc': i[0], 'ip': i[1], 'mask': i[2]}) + else: + iocage_ip4_dict['ip4'].append({'ifc': i[0], 'ip': i[1], 'mask': '-'}) + else: + iocage_ip4_dict['msg'] += item + + return iocage_ip4_dict + + +class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): + ''' Host inventory parser for ansible using iocage as source. ''' + + NAME = 'community.general.iocage' + IOCAGE = '/usr/local/bin/iocage' + + def __init__(self): + super(InventoryModule, self).__init__() + + def verify_file(self, path): + valid = False + if super(InventoryModule, self).verify_file(path): + if path.endswith(('iocage.yaml', 'iocage.yml')): + valid = True + else: + self.display.vvv('Skipping due to inventory source not ending in "iocage.yaml" nor "iocage.yml"') + return valid + + def parse(self, inventory, loader, path, cache=True): + super(InventoryModule, self).parse(inventory, loader, path) + + self._read_config_data(path) + cache_key = self.get_cache_key(path) + + user_cache_setting = self.get_option('cache') + attempt_to_read_cache = user_cache_setting and cache + cache_needs_update = user_cache_setting and not cache + + if attempt_to_read_cache: + try: + results = self._cache[cache_key] + except KeyError: + cache_needs_update = True + if not attempt_to_read_cache or cache_needs_update: + results = self.get_inventory(path) + if cache_needs_update: + self._cache[cache_key] = results + + self.populate(results) + + def get_inventory(self, path): + host = self.get_option('host') + sudo = self.get_option('sudo') + sudo_preserve_env = self.get_option('sudo_preserve_env') + env = self.get_option('env') + get_properties = self.get_option('get_properties') + hooks_results = self.get_option('hooks_results') + inventory_hostname_tag = self.get_option('inventory_hostname_tag') + inventory_hostname_required = self.get_option('inventory_hostname_required') + + cmd = [] + my_env = os.environ.copy() + if host == 'localhost': + my_env.update({str(k): str(v) for k, v in env.items()}) + else: + user = self.get_option('user') + cmd.append("ssh") + cmd.append(f"{user}@{host}") + cmd.extend([f"{k}={v}" for k, v in env.items()]) + + cmd_list = cmd.copy() + if sudo: + cmd_list.append('sudo') + if sudo_preserve_env: + cmd_list.append('--preserve-env') + cmd_list.append(self.IOCAGE) + cmd_list.append('list') + cmd_list.append('--long') + try: + p = Popen(cmd_list, stdout=PIPE, stderr=PIPE, env=my_env) + stdout, stderr = p.communicate() + if p.returncode != 0: + raise AnsibleError(f'Failed to run cmd={cmd_list}, rc={p.returncode}, stderr={to_native(stderr)}') + + try: + t_stdout = to_text(stdout, errors='surrogate_or_strict') + except UnicodeError as e: + raise AnsibleError(f'Invalid (non unicode) input returned: {e}') from e + + except Exception as e: + raise AnsibleParserError(f'Failed to parse {to_native(path)}: {e}') from e + + results = {'_meta': {'hostvars': {}}} + self.get_jails(t_stdout, results) + + if get_properties: + for hostname, host_vars in results['_meta']['hostvars'].items(): + cmd_get_properties = cmd.copy() + cmd_get_properties.append(self.IOCAGE) + cmd_get_properties.append("get") + cmd_get_properties.append("--all") + cmd_get_properties.append(f"{hostname}") + try: + p = Popen(cmd_get_properties, stdout=PIPE, stderr=PIPE, env=my_env) + stdout, stderr = p.communicate() + if p.returncode != 0: + raise AnsibleError( + f'Failed to run cmd={cmd_get_properties}, rc={p.returncode}, stderr={to_native(stderr)}') + + try: + t_stdout = to_text(stdout, errors='surrogate_or_strict') + except UnicodeError as e: + raise AnsibleError(f'Invalid (non unicode) input returned: {e}') from e + + except Exception as e: + raise AnsibleError(f'Failed to get properties: {e}') from e + + self.get_properties(t_stdout, results, hostname) + + if hooks_results: + cmd_get_pool = cmd.copy() + cmd_get_pool.append(self.IOCAGE) + cmd_get_pool.append('get') + cmd_get_pool.append('--pool') + try: + p = Popen(cmd_get_pool, stdout=PIPE, stderr=PIPE, env=my_env) + stdout, stderr = p.communicate() + if p.returncode != 0: + raise AnsibleError( + f'Failed to run cmd={cmd_get_pool}, rc={p.returncode}, stderr={to_native(stderr)}') + try: + iocage_pool = to_text(stdout, errors='surrogate_or_strict').strip() + except UnicodeError as e: + raise AnsibleError(f'Invalid (non unicode) input returned: {e}') from e + except Exception as e: + raise AnsibleError(f'Failed to get pool: {e}') from e + + for hostname, host_vars in results['_meta']['hostvars'].items(): + iocage_hooks = [] + for hook in hooks_results: + path = f"/{iocage_pool}/iocage/jails/{hostname}/root{hook}" + cmd_cat_hook = cmd.copy() + cmd_cat_hook.append('cat') + cmd_cat_hook.append(path) + try: + p = Popen(cmd_cat_hook, stdout=PIPE, stderr=PIPE, env=my_env) + stdout, stderr = p.communicate() + if p.returncode != 0: + iocage_hooks.append('-') + continue + + try: + iocage_hook = to_text(stdout, errors='surrogate_or_strict').strip() + except UnicodeError as e: + raise AnsibleError(f'Invalid (non unicode) input returned: {e}') from e + + except Exception: + iocage_hooks.append('-') + else: + iocage_hooks.append(iocage_hook) + + results['_meta']['hostvars'][hostname]['iocage_hooks'] = iocage_hooks + + # Optionally, get the jails names from the properties notes. + # Requires the notes format "t1=v1 t2=v2 ..." + if inventory_hostname_tag: + if not get_properties: + raise AnsibleError('Jail properties are needed to use inventory_hostname_tag. Enable get_properties') + update = {} + for hostname, host_vars in results['_meta']['hostvars'].items(): + tags = dict(tag.split('=', 1) for tag in host_vars['iocage_properties']['notes'].split() if '=' in tag) + if inventory_hostname_tag in tags: + update[hostname] = tags[inventory_hostname_tag] + elif inventory_hostname_required: + raise AnsibleError(f'Mandatory tag {inventory_hostname_tag!r} is missing in the properties notes.') + for hostname, alias in update.items(): + results['_meta']['hostvars'][alias] = results['_meta']['hostvars'].pop(hostname) + + return results + + def get_jails(self, t_stdout, results): + lines = t_stdout.splitlines() + if len(lines) < 5: + return + indices = [i for i, val in enumerate(lines[1]) if val == '|'] + for line in lines[3::2]: + jail = [line[i + 1:j].strip() for i, j in zip(indices[:-1], indices[1:])] + iocage_name = jail[1] + iocage_ip4_dict = _parse_ip4(jail[6]) + if iocage_ip4_dict['ip4']: + iocage_ip4 = ','.join([d['ip'] for d in iocage_ip4_dict['ip4']]) + else: + iocage_ip4 = '-' + results['_meta']['hostvars'][iocage_name] = {} + results['_meta']['hostvars'][iocage_name]['iocage_jid'] = jail[0] + results['_meta']['hostvars'][iocage_name]['iocage_boot'] = jail[2] + results['_meta']['hostvars'][iocage_name]['iocage_state'] = jail[3] + results['_meta']['hostvars'][iocage_name]['iocage_type'] = jail[4] + results['_meta']['hostvars'][iocage_name]['iocage_release'] = jail[5] + results['_meta']['hostvars'][iocage_name]['iocage_ip4_dict'] = iocage_ip4_dict + results['_meta']['hostvars'][iocage_name]['iocage_ip4'] = iocage_ip4 + results['_meta']['hostvars'][iocage_name]['iocage_ip6'] = jail[7] + results['_meta']['hostvars'][iocage_name]['iocage_template'] = jail[8] + results['_meta']['hostvars'][iocage_name]['iocage_basejail'] = jail[9] + + def get_properties(self, t_stdout, results, hostname): + properties = dict(x.split(':', 1) for x in t_stdout.splitlines()) + results['_meta']['hostvars'][hostname]['iocage_properties'] = properties + + def populate(self, results): + strict = self.get_option('strict') + + for hostname, host_vars in results['_meta']['hostvars'].items(): + self.inventory.add_host(hostname, group='all') + for var, value in host_vars.items(): + self.inventory.set_variable(hostname, var, value) + self._set_composite_vars(self.get_option('compose'), host_vars, hostname, strict=True) + self._add_host_to_composed_groups(self.get_option('groups'), host_vars, hostname, strict=strict) + self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host_vars, hostname, strict=strict) diff --git a/plugins/inventory/linode.py b/plugins/inventory/linode.py index ea87a9a58e..fc039b03b5 100644 --- a/plugins/inventory/linode.py +++ b/plugins/inventory/linode.py @@ -1,92 +1,93 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' - name: linode - author: - - Luke Murphy (@decentral1se) - short_description: Ansible dynamic inventory plugin for Linode. - requirements: - - python >= 2.7 - - linode_api4 >= 2.0.0 - description: - - Reads inventories from the Linode API v4. - - Uses a YAML configuration file that ends with linode.(yml|yaml). - - Linode labels are used by default as the hostnames. - - The default inventory groups are built from groups (deprecated by - Linode) and not tags. - extends_documentation_fragment: - - constructed - - inventory_cache - options: - cache: - version_added: 4.5.0 - cache_plugin: - version_added: 4.5.0 - cache_timeout: - version_added: 4.5.0 - cache_connection: - version_added: 4.5.0 - cache_prefix: - version_added: 4.5.0 - plugin: - description: Marks this as an instance of the 'linode' plugin. - required: true - choices: ['linode', 'community.general.linode'] - ip_style: - description: Populate hostvars with all information available from the Linode APIv4. - type: string - default: plain - choices: - - plain - - api - version_added: 3.6.0 - access_token: - description: The Linode account personal access token. - required: true - env: - - name: LINODE_ACCESS_TOKEN - regions: - description: Populate inventory with instances in this region. - default: [] - type: list - elements: string - tags: - description: Populate inventory only with instances which have at least one of the tags listed here. - default: [] - type: list - elements: string - version_added: 2.0.0 - types: - description: Populate inventory with instances with this type. - default: [] - type: list - elements: string - strict: - version_added: 2.0.0 - compose: - version_added: 2.0.0 - groups: - version_added: 2.0.0 - keyed_groups: - version_added: 2.0.0 -''' +DOCUMENTATION = r""" +name: linode +author: + - Luke Murphy (@decentral1se) +short_description: Ansible dynamic inventory plugin for Linode +requirements: + - linode_api4 >= 2.0.0 +description: + - Reads inventories from the Linode API v4. + - Uses a YAML configuration file that ends with linode.(yml|yaml). + - Linode labels are used by default as the hostnames. + - The default inventory groups are built from groups (deprecated by Linode) and not tags. +extends_documentation_fragment: + - constructed + - inventory_cache +options: + cache: + version_added: 4.5.0 + cache_plugin: + version_added: 4.5.0 + cache_timeout: + version_added: 4.5.0 + cache_connection: + version_added: 4.5.0 + cache_prefix: + version_added: 4.5.0 + plugin: + description: Marks this as an instance of the 'linode' plugin. + type: string + required: true + choices: ['linode', 'community.general.linode'] + ip_style: + description: Populate hostvars with all information available from the Linode APIv4. + type: string + default: plain + choices: + - plain + - api + version_added: 3.6.0 + access_token: + description: The Linode account personal access token. + type: string + required: true + env: + - name: LINODE_ACCESS_TOKEN + regions: + description: Populate inventory with instances in this region. + default: [] + type: list + elements: string + tags: + description: Populate inventory only with instances which have at least one of the tags listed here. + default: [] + type: list + elements: string + version_added: 2.0.0 + types: + description: Populate inventory with instances with this type. + default: [] + type: list + elements: string + strict: + version_added: 2.0.0 + compose: + version_added: 2.0.0 + groups: + version_added: 2.0.0 + keyed_groups: + version_added: 2.0.0 +""" -EXAMPLES = r''' +EXAMPLES = r""" +--- # Minimal example. `LINODE_ACCESS_TOKEN` is exposed in environment. plugin: community.general.linode +--- # You can use Jinja to template the access token. plugin: community.general.linode access_token: "{{ lookup('ini', 'token', section='your_username', file='~/.config/linode-cli') }}" # For older Ansible versions, you need to write this as: # access_token: "{{ lookup('ini', 'token section=your_username file=~/.config/linode-cli') }}" +--- # Example with regions, types, groups and access token plugin: community.general.linode access_token: foobar @@ -95,6 +96,7 @@ regions: types: - g5-standard-2 +--- # Example with keyed_groups, groups, and compose plugin: community.general.linode access_token: foobar @@ -113,20 +115,20 @@ compose: ansible_ssh_host: ipv4[0] ansible_port: 2222 +--- # Example where control traffic limited to internal network plugin: community.general.linode access_token: foobar ip_style: api compose: ansible_host: "ipv4 | community.general.json_query('[?public==`false`].address') | first" -''' +""" -import os - -from ansible.errors import AnsibleError, AnsibleParserError -from ansible.module_utils.six import string_types +from ansible.errors import AnsibleError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe + try: from linode_api4 import LinodeClient @@ -146,7 +148,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): access_token = self.get_option('access_token') if self.templar.is_template(access_token): - access_token = self.templar.template(variable=access_token, disable_lookups=False) + access_token = self.templar.template(variable=access_token) if access_token is None: raise AnsibleError(( @@ -161,17 +163,11 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): try: self.instances = self.client.linode.instances() except LinodeApiError as exception: - raise AnsibleError('Linode client raised: %s' % exception) + raise AnsibleError(f'Linode client raised: {exception}') def _add_groups(self): """Add Linode instance groups to the dynamic inventory.""" - self.linode_groups = set( - filter(None, [ - instance.group - for instance - in self.instances - ]) - ) + self.linode_groups = {instance.group for instance in self.instances if instance.group} for linode_group in self.linode_groups: self.inventory.add_group(linode_group) @@ -202,20 +198,21 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): def _add_instances_to_groups(self): """Add instance names to their dynamic inventory groups.""" for instance in self.instances: - self.inventory.add_host(instance.label, group=instance.group) + self.inventory.add_host(make_unsafe(instance.label), group=instance.group) def _add_hostvars_for_instances(self): """Add hostvars for instances in the dynamic inventory.""" ip_style = self.get_option('ip_style') for instance in self.instances: hostvars = instance._raw_json + hostname = make_unsafe(instance.label) for hostvar_key in hostvars: if ip_style == 'api' and hostvar_key in ['ipv4', 'ipv6']: continue self.inventory.set_variable( - instance.label, + hostname, hostvar_key, - hostvars[hostvar_key] + make_unsafe(hostvars[hostvar_key]) ) if ip_style == 'api': ips = instance.ips.ipv4.public + instance.ips.ipv4.private @@ -224,9 +221,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): for ip_type in set(ip.type for ip in ips): self.inventory.set_variable( - instance.label, + hostname, ip_type, - self._ip_data([ip for ip in ips if ip.type == ip_type]) + make_unsafe(self._ip_data([ip for ip in ips if ip.type == ip_type])) ) def _ip_data(self, ip_list): @@ -257,30 +254,44 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): self._add_instances_to_groups() self._add_hostvars_for_instances() for instance in self.instances: - variables = self.inventory.get_host(instance.label).get_vars() + hostname = make_unsafe(instance.label) + variables = self.inventory.get_host(hostname).get_vars() self._add_host_to_composed_groups( self.get_option('groups'), variables, - instance.label, + hostname, strict=strict) self._add_host_to_keyed_groups( self.get_option('keyed_groups'), variables, - instance.label, + hostname, strict=strict) self._set_composite_vars( self.get_option('compose'), variables, - instance.label, + hostname, strict=strict) def verify_file(self, path): - """Verify the Linode configuration file.""" + """Verify the Linode configuration file. + + Return true/false if the config-file is valid for this plugin + + Args: + str(path): path to the config + Kwargs: + None + Raises: + None + Returns: + bool(valid): is valid config file""" + valid = False if super(InventoryModule, self).verify_file(path): - endings = ('linode.yaml', 'linode.yml') - if any((path.endswith(ending) for ending in endings)): - return True - return False + if path.endswith(("linode.yaml", "linode.yml")): + valid = True + else: + self.display.vvv('Inventory source not ending in "linode.yaml" or "linode.yml"') + return valid def parse(self, inventory, loader, path, cache=True): """Dynamically parse Linode the cloud inventory.""" diff --git a/plugins/inventory/lxd.py b/plugins/inventory/lxd.py index 2e37de70c1..492d12a21b 100644 --- a/plugins/inventory/lxd.py +++ b/plugins/inventory/lxd.py @@ -1,107 +1,123 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2021, Frank Dornheim # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' - name: lxd - short_description: Returns Ansible inventory from lxd host +DOCUMENTATION = r""" +name: lxd +short_description: Returns Ansible inventory from lxd host +description: + - Get inventory from the lxd. + - Uses a YAML configuration file that ends with 'lxd.(yml|yaml)'. +version_added: "3.0.0" +author: "Frank Dornheim (@conloos)" +requirements: + - ipaddress + - lxd >= 4.0 +options: + plugin: + description: Token that ensures this is a source file for the 'lxd' plugin. + type: string + required: true + choices: ['community.general.lxd'] + url: description: - - Get inventory from the lxd. - - Uses a YAML configuration file that ends with 'lxd.(yml|yaml)'. - version_added: "3.0.0" - author: "Frank Dornheim (@conloos)" - requirements: - - ipaddress - - lxd >= 4.0 - options: - plugin: - description: Token that ensures this is a source file for the 'lxd' plugin. - required: true - choices: [ 'community.general.lxd' ] - url: - description: - - The unix domain socket path or the https URL for the lxd server. - - Sockets in filesystem have to start with C(unix:). - - Mostly C(unix:/var/lib/lxd/unix.socket) or C(unix:/var/snap/lxd/common/lxd/unix.socket). - default: unix:/var/snap/lxd/common/lxd/unix.socket - type: str - client_key: - description: - - The client certificate key file path. - aliases: [ key_file ] - default: $HOME/.config/lxc/client.key - type: path - client_cert: - description: - - The client certificate file path. - aliases: [ cert_file ] - default: $HOME/.config/lxc/client.crt - type: path - trust_password: - description: - - The client trusted password. - - You need to set this password on the lxd server before - running this module using the following command - C(lxc config set core.trust_password ) - See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/). - - If I(trust_password) is set, this module send a request for authentication before sending any requests. - type: str - state: - description: Filter the instance according to the current status. - type: str - default: none - choices: [ 'STOPPED', 'STARTING', 'RUNNING', 'none' ] - project: - description: Filter the instance according to the given project. - type: str - default: default - version_added: 6.2.0 - type_filter: - description: - - Filter the instances by type C(virtual-machine), C(container) or C(both). - - The first version of the inventory only supported containers. - type: str - default: container - choices: [ 'virtual-machine', 'container', 'both' ] - version_added: 4.2.0 - prefered_instance_network_interface: - description: - - If an instance has multiple network interfaces, select which one is the prefered as pattern. - - Combined with the first number that can be found e.g. 'eth' + 0. - - The option has been renamed from I(prefered_container_network_interface) to I(prefered_instance_network_interface) in community.general 3.8.0. - The old name still works as an alias. - type: str - default: eth - aliases: - - prefered_container_network_interface - prefered_instance_network_family: - description: - - If an instance has multiple network interfaces, which one is the prefered by family. - - Specify C(inet) for IPv4 and C(inet6) for IPv6. - type: str - default: inet - choices: [ 'inet', 'inet6' ] - groupby: - description: - - Create groups by the following keywords C(location), C(network_range), C(os), C(pattern), C(profile), C(release), C(type), C(vlanid). - - See example for syntax. - type: dict -''' + - The unix domain socket path or the https URL for the lxd server. + - Sockets in filesystem have to start with C(unix:). + - Mostly C(unix:/var/lib/lxd/unix.socket) or C(unix:/var/snap/lxd/common/lxd/unix.socket). + type: string + default: unix:/var/snap/lxd/common/lxd/unix.socket + client_key: + description: + - The client certificate key file path. + aliases: [key_file] + default: $HOME/.config/lxc/client.key + type: path + client_cert: + description: + - The client certificate file path. + aliases: [cert_file] + default: $HOME/.config/lxc/client.crt + type: path + server_cert: + description: + - The server certificate file path. + type: path + version_added: 8.0.0 + server_check_hostname: + description: + - This option controls if the server's hostname is checked as part of the HTTPS connection verification. This can be + useful to disable, if for example, the server certificate provided (see O(server_cert) option) does not cover a name + matching the one used to communicate with the server. Such mismatch is common as LXD generates self-signed server + certificates by default. + type: bool + default: true + version_added: 8.0.0 + trust_password: + description: + - The client trusted password. + - You need to set this password on the lxd server before running this module using the following command C(lxc config + set core.trust_password ) See + U(https://documentation.ubuntu.com/lxd/en/latest/authentication/#adding-client-certificates-using-a-trust-password). + - If O(trust_password) is set, this module send a request for authentication before sending any requests. + type: str + state: + description: Filter the instance according to the current status. + type: str + default: none + choices: ['STOPPED', 'STARTING', 'RUNNING', 'none'] + project: + description: Filter the instance according to the given project. + type: str + default: default + version_added: 6.2.0 + type_filter: + description: + - Filter the instances by type V(virtual-machine), V(container) or V(both). + - The first version of the inventory only supported containers. + type: str + default: container + choices: ['virtual-machine', 'container', 'both'] + version_added: 4.2.0 + prefered_instance_network_interface: + description: + - If an instance has multiple network interfaces, select which one is the preferred as pattern. + - Combined with the first number that can be found, for example C(eth) + C(0). + - The option has been renamed from O(prefered_container_network_interface) to O(prefered_instance_network_interface) + in community.general 3.8.0. The old name still works as an alias. + type: str + default: eth + aliases: + - prefered_container_network_interface + prefered_instance_network_family: + description: + - If an instance has multiple network interfaces, which one is the preferred by family. + - Specify V(inet) for IPv4 and V(inet6) for IPv6. + type: str + default: inet + choices: ['inet', 'inet6'] + groupby: + description: + - Create groups by the following keywords C(location), C(network_range), C(os), C(pattern), C(profile), C(release), + C(type), C(vlanid). + - See example for syntax. + type: dict +""" -EXAMPLES = ''' +EXAMPLES = r""" +--- # simple lxd.yml plugin: community.general.lxd url: unix:/var/snap/lxd/common/lxd/unix.socket +--- # simple lxd.yml including filter plugin: community.general.lxd url: unix:/var/snap/lxd/common/lxd/unix.socket state: RUNNING +--- # simple lxd.yml including virtual machines and containers plugin: community.general.lxd url: unix:/var/snap/lxd/common/lxd/unix.socket @@ -148,21 +164,20 @@ groupby: projectInternals: type: project attribute: internals -''' +""" -import binascii import json import re import time import os -import socket +from urllib.parse import urlencode + from ansible.plugins.inventory import BaseInventoryPlugin from ansible.module_utils.common.text.converters import to_native, to_text from ansible.module_utils.common.dict_transformations import dict_merge -from ansible.module_utils.six import raise_from from ansible.errors import AnsibleError, AnsibleParserError -from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible_collections.community.general.plugins.module_utils.lxd import LXDClient, LXDClientException +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe try: import ipaddress @@ -197,7 +212,7 @@ class InventoryModule(BaseInventoryPlugin): with open(path, 'r') as json_file: return json.load(json_file) except (IOError, json.decoder.JSONDecodeError) as err: - raise AnsibleParserError('Could not load the test data from {0}: {1}'.format(to_native(path), to_native(err))) + raise AnsibleParserError(f'Could not load the test data from {to_native(path)}: {err}') def save_json_data(self, path, file_name=None): """save data as json @@ -227,7 +242,7 @@ class InventoryModule(BaseInventoryPlugin): with open(os.path.abspath(os.path.join(cwd, *path)), 'w') as json_file: json.dump(self.data, json_file) except IOError as err: - raise AnsibleParserError('Could not save data: {0}'.format(to_native(err))) + raise AnsibleParserError(f'Could not save data: {err}') def verify_file(self, path): """Check the config @@ -267,7 +282,7 @@ class InventoryModule(BaseInventoryPlugin): if not isinstance(url, str): return False if not url.startswith(('unix:', 'https:')): - raise AnsibleError('URL is malformed: {0}'.format(to_native(url))) + raise AnsibleError(f'URL is malformed: {url}') return True def _connect_to_socket(self): @@ -288,11 +303,11 @@ class InventoryModule(BaseInventoryPlugin): urls = (url for url in url_list if self.validate_url(url)) for url in urls: try: - socket_connection = LXDClient(url, self.client_key, self.client_cert, self.debug) + socket_connection = LXDClient(url, self.client_key, self.client_cert, self.debug, self.server_cert, self.server_check_hostname) return socket_connection except LXDClientException as err: error_storage[url] = err - raise AnsibleError('No connection to the socket: {0}'.format(to_native(error_storage))) + raise AnsibleError(f'No connection to the socket: {error_storage}') def _get_networks(self): """Get Networknames @@ -341,7 +356,7 @@ class InventoryModule(BaseInventoryPlugin): # } url = '/1.0/instances' if self.project: - url = url + '?{0}'.format(urlencode(dict(project=self.project))) + url = f"{url}?{urlencode(dict(project=self.project))}" instances = self.socket.do('GET', url) @@ -361,7 +376,7 @@ class InventoryModule(BaseInventoryPlugin): Kwargs: None Source: - https://github.com/lxc/lxd/blob/master/doc/rest-api.md + https://documentation.ubuntu.com/lxd/en/latest/rest-api/ Raises: None Returns: @@ -369,16 +384,16 @@ class InventoryModule(BaseInventoryPlugin): config = {} if isinstance(branch, (tuple, list)): config[name] = {branch[1]: self.socket.do( - 'GET', '/1.0/{0}/{1}/{2}?{3}'.format(to_native(branch[0]), to_native(name), to_native(branch[1]), urlencode(dict(project=self.project))))} + 'GET', f'/1.0/{to_native(branch[0])}/{to_native(name)}/{to_native(branch[1])}?{urlencode(dict(project=self.project))}')} else: config[name] = {branch: self.socket.do( - 'GET', '/1.0/{0}/{1}?{2}'.format(to_native(branch), to_native(name), urlencode(dict(project=self.project))))} + 'GET', f'/1.0/{to_native(branch)}/{to_native(name)}?{urlencode(dict(project=self.project))}')} return config def get_instance_data(self, names): """Create Inventory of the instance - Iterate through the different branches of the instances and collect Informations. + Iterate through the different branches of the instances and collect Information. Args: list(names): List of instance names @@ -400,7 +415,7 @@ class InventoryModule(BaseInventoryPlugin): def get_network_data(self, names): """Create Inventory of the instance - Iterate through the different branches of the instances and collect Informations. + Iterate through the different branches of the instances and collect Information. Args: list(names): List of instance names @@ -435,7 +450,7 @@ class InventoryModule(BaseInventoryPlugin): None Returns: dict(network_configuration): network config""" - instance_network_interfaces = self._get_data_entry('instances/{0}/state/metadata/network'.format(instance_name)) + instance_network_interfaces = self._get_data_entry(f'instances/{instance_name}/state/metadata/network') network_configuration = None if instance_network_interfaces: network_configuration = {} @@ -448,24 +463,24 @@ class InventoryModule(BaseInventoryPlugin): address_set['family'] = address.get('family') address_set['address'] = address.get('address') address_set['netmask'] = address.get('netmask') - address_set['combined'] = address.get('address') + '/' + address.get('netmask') + address_set['combined'] = f"{address.get('address')}/{address.get('netmask')}" network_configuration[interface_name].append(address_set) return network_configuration def get_prefered_instance_network_interface(self, instance_name): - """Helper to get the prefered interface of thr instance + """Helper to get the preferred interface of thr instance - Helper to get the prefered interface provide by neme pattern from 'prefered_instance_network_interface'. + Helper to get the preferred interface provide by neme pattern from 'prefered_instance_network_interface'. Args: - str(containe_name): name of instance + str(instance_name): name of instance Kwargs: None Raises: None Returns: str(prefered_interface): None or interface name""" - instance_network_interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name)) + instance_network_interfaces = self._get_data_entry(f'inventory/{instance_name}/network_interfaces') prefered_interface = None # init if instance_network_interfaces: # instance have network interfaces # generator if interfaces which start with the desired pattern @@ -483,7 +498,7 @@ class InventoryModule(BaseInventoryPlugin): Helper to get the VLAN_ID from the instance Args: - str(containe_name): name of instance + str(instance_name): name of instance Kwargs: None Raises: @@ -502,7 +517,7 @@ class InventoryModule(BaseInventoryPlugin): # "network":"lxdbr0", # "type":"nic"}, vlan_ids = {} - devices = self._get_data_entry('instances/{0}/instances/metadata/expanded_devices'.format(to_native(instance_name))) + devices = self._get_data_entry(f'instances/{to_native(instance_name)}/instances/metadata/expanded_devices') for device in devices: if 'network' in devices[device]: if devices[device]['network'] in network_vlans: @@ -565,7 +580,7 @@ class InventoryModule(BaseInventoryPlugin): else: path[instance_name][key] = value except KeyError as err: - raise AnsibleParserError("Unable to store Informations: {0}".format(to_native(err))) + raise AnsibleParserError(f"Unable to store Information: {err}") def extract_information_from_instance_configs(self): """Process configuration information @@ -586,24 +601,24 @@ class InventoryModule(BaseInventoryPlugin): for instance_name in self.data['instances']: self._set_data_entry(instance_name, 'os', self._get_data_entry( - 'instances/{0}/instances/metadata/config/image.os'.format(instance_name))) + f'instances/{instance_name}/instances/metadata/config/image.os')) self._set_data_entry(instance_name, 'release', self._get_data_entry( - 'instances/{0}/instances/metadata/config/image.release'.format(instance_name))) + f'instances/{instance_name}/instances/metadata/config/image.release')) self._set_data_entry(instance_name, 'version', self._get_data_entry( - 'instances/{0}/instances/metadata/config/image.version'.format(instance_name))) + f'instances/{instance_name}/instances/metadata/config/image.version')) self._set_data_entry(instance_name, 'profile', self._get_data_entry( - 'instances/{0}/instances/metadata/profiles'.format(instance_name))) + f'instances/{instance_name}/instances/metadata/profiles')) self._set_data_entry(instance_name, 'location', self._get_data_entry( - 'instances/{0}/instances/metadata/location'.format(instance_name))) + f'instances/{instance_name}/instances/metadata/location')) self._set_data_entry(instance_name, 'state', self._get_data_entry( - 'instances/{0}/instances/metadata/config/volatile.last_state.power'.format(instance_name))) + f'instances/{instance_name}/instances/metadata/config/volatile.last_state.power')) self._set_data_entry(instance_name, 'type', self._get_data_entry( - 'instances/{0}/instances/metadata/type'.format(instance_name))) + f'instances/{instance_name}/instances/metadata/type')) self._set_data_entry(instance_name, 'network_interfaces', self.extract_network_information_from_instance_config(instance_name)) self._set_data_entry(instance_name, 'preferred_interface', self.get_prefered_instance_network_interface(instance_name)) self._set_data_entry(instance_name, 'vlan_ids', self.get_instance_vlans(instance_name)) self._set_data_entry(instance_name, 'project', self._get_data_entry( - 'instances/{0}/instances/metadata/project'.format(instance_name))) + f'instances/{instance_name}/instances/metadata/project')) def build_inventory_network(self, instance_name): """Add the network interfaces of the instance to the inventory @@ -637,18 +652,18 @@ class InventoryModule(BaseInventoryPlugin): None Returns: dict(interface_name: ip)""" - prefered_interface = self._get_data_entry('inventory/{0}/preferred_interface'.format(instance_name)) # name or None + prefered_interface = self._get_data_entry(f'inventory/{instance_name}/preferred_interface') # name or None prefered_instance_network_family = self.prefered_instance_network_family ip_address = '' if prefered_interface: - interface = self._get_data_entry('inventory/{0}/network_interfaces/{1}'.format(instance_name, prefered_interface)) + interface = self._get_data_entry(f'inventory/{instance_name}/network_interfaces/{prefered_interface}') for config in interface: if config['family'] == prefered_instance_network_family: ip_address = config['address'] break else: - interfaces = self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name)) + interfaces = self._get_data_entry(f'inventory/{instance_name}/network_interfaces') for interface in interfaces.values(): for config in interface: if config['family'] == prefered_instance_network_family: @@ -656,9 +671,9 @@ class InventoryModule(BaseInventoryPlugin): break return ip_address - if self._get_data_entry('inventory/{0}/network_interfaces'.format(instance_name)): # instance have network interfaces + if self._get_data_entry(f'inventory/{instance_name}/network_interfaces'): # instance have network interfaces self.inventory.set_variable(instance_name, 'ansible_connection', 'ssh') - self.inventory.set_variable(instance_name, 'ansible_host', interface_selection(instance_name)) + self.inventory.set_variable(instance_name, 'ansible_host', make_unsafe(interface_selection(instance_name))) else: self.inventory.set_variable(instance_name, 'ansible_connection', 'local') @@ -677,38 +692,46 @@ class InventoryModule(BaseInventoryPlugin): Returns: None""" for instance_name in self.data['inventory']: - instance_state = str(self._get_data_entry('inventory/{0}/state'.format(instance_name)) or "STOPPED").lower() + instance_state = str(self._get_data_entry(f'inventory/{instance_name}/state') or "STOPPED").lower() # Only consider instances that match the "state" filter, if self.state is not None if self.filter: if self.filter.lower() != instance_state: continue # add instance + instance_name = make_unsafe(instance_name) self.inventory.add_host(instance_name) - # add network informations + # add network information self.build_inventory_network(instance_name) # add os - v = self._get_data_entry('inventory/{0}/os'.format(instance_name)) + v = self._get_data_entry(f'inventory/{instance_name}/os') if v: - self.inventory.set_variable(instance_name, 'ansible_lxd_os', v.lower()) + self.inventory.set_variable(instance_name, 'ansible_lxd_os', make_unsafe(v.lower())) # add release - v = self._get_data_entry('inventory/{0}/release'.format(instance_name)) + v = self._get_data_entry(f'inventory/{instance_name}/release') if v: - self.inventory.set_variable(instance_name, 'ansible_lxd_release', v.lower()) + self.inventory.set_variable( + instance_name, 'ansible_lxd_release', make_unsafe(v.lower())) # add profile - self.inventory.set_variable(instance_name, 'ansible_lxd_profile', self._get_data_entry('inventory/{0}/profile'.format(instance_name))) + self.inventory.set_variable( + instance_name, 'ansible_lxd_profile', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/profile'))) # add state - self.inventory.set_variable(instance_name, 'ansible_lxd_state', instance_state) + self.inventory.set_variable( + instance_name, 'ansible_lxd_state', make_unsafe(instance_state)) # add type - self.inventory.set_variable(instance_name, 'ansible_lxd_type', self._get_data_entry('inventory/{0}/type'.format(instance_name))) + self.inventory.set_variable( + instance_name, 'ansible_lxd_type', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/type'))) # add location information - if self._get_data_entry('inventory/{0}/location'.format(instance_name)) != "none": # wrong type by lxd 'none' != 'None' - self.inventory.set_variable(instance_name, 'ansible_lxd_location', self._get_data_entry('inventory/{0}/location'.format(instance_name))) + if self._get_data_entry(f'inventory/{instance_name}/location') != "none": # wrong type by lxd 'none' != 'None' + self.inventory.set_variable( + instance_name, 'ansible_lxd_location', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/location'))) # add VLAN_ID information - if self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name)): - self.inventory.set_variable(instance_name, 'ansible_lxd_vlan_ids', self._get_data_entry('inventory/{0}/vlan_ids'.format(instance_name))) + if self._get_data_entry(f'inventory/{instance_name}/vlan_ids'): + self.inventory.set_variable( + instance_name, 'ansible_lxd_vlan_ids', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/vlan_ids'))) # add project - self.inventory.set_variable(instance_name, 'ansible_lxd_project', self._get_data_entry('inventory/{0}/project'.format(instance_name))) + self.inventory.set_variable( + instance_name, 'ansible_lxd_project', make_unsafe(self._get_data_entry(f'inventory/{instance_name}/project'))) def build_inventory_groups_location(self, group_name): """create group by attribute: location @@ -770,7 +793,7 @@ class InventoryModule(BaseInventoryPlugin): network = ipaddress.ip_network(to_text(self.groupby[group_name].get('attribute'))) except ValueError as err: raise AnsibleParserError( - 'Error while parsing network range {0}: {1}'.format(self.groupby[group_name].get('attribute'), to_native(err))) + f"Error while parsing network range {self.groupby[group_name].get('attribute')}: {err}") for instance_name in self.inventory.hosts: if self.data['inventory'][instance_name].get('network_interfaces') is not None: @@ -975,13 +998,13 @@ class InventoryModule(BaseInventoryPlugin): elif self.groupby[group_name].get('type') == 'project': self.build_inventory_groups_project(group_name) else: - raise AnsibleParserError('Unknown group type: {0}'.format(to_native(group_name))) + raise AnsibleParserError(f'Unknown group type: {to_native(group_name)}') if self.groupby: for group_name in self.groupby: if not group_name.isalnum(): - raise AnsibleParserError('Invalid character(s) in groupname: {0}'.format(to_native(group_name))) - group_type(group_name) + raise AnsibleParserError(f'Invalid character(s) in groupname: {to_native(group_name)}') + group_type(make_unsafe(group_name)) def build_inventory(self): """Build dynamic inventory @@ -1017,7 +1040,7 @@ class InventoryModule(BaseInventoryPlugin): None""" iter_keys = list(self.data['instances'].keys()) for instance_name in iter_keys: - if self._get_data_entry('instances/{0}/instances/metadata/type'.format(instance_name)) != self.type_filter: + if self._get_data_entry(f'instances/{instance_name}/instances/metadata/type') != self.type_filter: del self.data['instances'][instance_name] def _populate(self): @@ -1070,9 +1093,7 @@ class InventoryModule(BaseInventoryPlugin): Returns: None""" if IPADDRESS_IMPORT_ERROR: - raise_from( - AnsibleError('another_library must be installed to use this plugin'), - IPADDRESS_IMPORT_ERROR) + raise AnsibleError('another_library must be installed to use this plugin') from IPADDRESS_IMPORT_ERROR super(InventoryModule, self).parse(inventory, loader, path, cache=False) # Read the inventory YAML file @@ -1080,6 +1101,8 @@ class InventoryModule(BaseInventoryPlugin): try: self.client_key = self.get_option('client_key') self.client_cert = self.get_option('client_cert') + self.server_cert = self.get_option('server_cert') + self.server_check_hostname = self.get_option('server_check_hostname') self.project = self.get_option('project') self.debug = self.DEBUG self.data = {} # store for inventory-data @@ -1096,6 +1119,6 @@ class InventoryModule(BaseInventoryPlugin): self.url = self.get_option('url') except Exception as err: raise AnsibleParserError( - 'All correct options required: {0}'.format(to_native(err))) + f'All correct options required: {err}') # Call our internal helper to populate the dynamic inventory self._populate() diff --git a/plugins/inventory/nmap.py b/plugins/inventory/nmap.py index f0fa50e3b3..ea0ce560fd 100644 --- a/plugins/inventory/nmap.py +++ b/plugins/inventory/nmap.py @@ -1,87 +1,127 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: nmap - short_description: Uses nmap to find hosts to target +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: nmap +short_description: Uses nmap to find hosts to target +description: + - Uses a YAML configuration file with a valid YAML extension. +extends_documentation_fragment: + - constructed + - inventory_cache +requirements: + - nmap CLI installed +options: + plugin: + description: Token that ensures this is a source file for the P(community.general.nmap#inventory) plugin. + type: string + required: true + choices: ['nmap', 'community.general.nmap'] + sudo: + description: Set to V(true) to execute a C(sudo nmap) plugin scan. + version_added: 4.8.0 + default: false + type: boolean + address: + description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation. + type: string + required: true + env: + - name: ANSIBLE_NMAP_ADDRESS + version_added: 6.6.0 + exclude: description: - - Uses a YAML configuration file with a valid YAML extension. - extends_documentation_fragment: - - constructed - - inventory_cache - requirements: - - nmap CLI installed - options: - plugin: - description: token that ensures this is a source file for the 'nmap' plugin. - required: true - choices: ['nmap', 'community.general.nmap'] - sudo: - description: Set to C(true) to execute a C(sudo nmap) plugin scan. - version_added: 4.8.0 - default: false - type: boolean - address: - description: Network IP or range of IPs to scan, you can use a simple range (10.2.2.15-25) or CIDR notation. - required: true - exclude: - description: list of addresses to exclude - type: list - elements: string - ports: - description: Enable/disable scanning for open ports - type: boolean - default: true - ipv4: - description: use IPv4 type addresses - type: boolean - default: true - ipv6: - description: use IPv6 type addresses - type: boolean - default: true - udp_scan: - description: - - Scan via UDP. - - Depending on your system you might need I(sudo=true) for this to work. - type: boolean - default: false - version_added: 6.1.0 - icmp_timestamp: - description: - - Scan via ICMP Timestamp (C(-PP)). - - Depending on your system you might need I(sudo=true) for this to work. - type: boolean - default: false - version_added: 6.1.0 - dns_resolve: - description: Whether to always (C(true)) or never (C(false)) do DNS resolution. - type: boolean - default: false - version_added: 6.1.0 - notes: - - At least one of ipv4 or ipv6 is required to be True, both can be True, but they cannot both be False. - - 'TODO: add OS fingerprinting' -''' -EXAMPLES = ''' + - List of addresses to exclude. + - For example V(10.2.2.15-25) or V(10.2.2.15,10.2.2.16). + type: list + elements: string + env: + - name: ANSIBLE_NMAP_EXCLUDE + version_added: 6.6.0 + port: + description: + - Only scan specific port or port range (C(-p)). + - For example, you could pass V(22) for a single port, V(1-65535) for a range of ports, or V(U:53,137,T:21-25,139,8080,S:9) + to check port 53 with UDP, ports 21-25 with TCP, port 9 with SCTP, and ports 137, 139, and 8080 with all. + type: string + version_added: 6.5.0 + ports: + description: Enable/disable scanning ports. + type: boolean + default: true + ipv4: + description: Use IPv4 type addresses. + type: boolean + default: true + ipv6: + description: Use IPv6 type addresses. + type: boolean + default: true + udp_scan: + description: + - Scan using UDP. + - Depending on your system you might need O(sudo=true) for this to work. + type: boolean + default: false + version_added: 6.1.0 + icmp_timestamp: + description: + - Scan using ICMP Timestamp (C(-PP)). + - Depending on your system you might need O(sudo=true) for this to work. + type: boolean + default: false + version_added: 6.1.0 + open: + description: Only scan for open (or possibly open) ports. + type: boolean + default: false + version_added: 6.5.0 + dns_resolve: + description: Whether to always (V(true)) or never (V(false)) do DNS resolution. + type: boolean + default: false + version_added: 6.1.0 + dns_servers: + description: Specify which DNS servers to use for name resolution. + type: list + elements: string + version_added: 10.5.0 + use_arp_ping: + description: Whether to always (V(true)) use the quick ARP ping or (V(false)) a slower but more reliable method. + type: boolean + default: true + version_added: 7.4.0 +notes: + - At least one of O(ipv4) or O(ipv6) is required to be V(true); both can be V(true), but they cannot both be V(false). + - 'TODO: add OS fingerprinting.' +""" +EXAMPLES = r""" +--- # inventory.config file in YAML format plugin: community.general.nmap strict: false address: 192.168.0.0/24 - +--- # a sudo nmap scan to fully use nmap scan power. plugin: community.general.nmap sudo: true strict: false address: 192.168.0.0/24 -''' + +--- +# an nmap scan specifying ports and classifying results to an inventory group +plugin: community.general.nmap +address: 192.168.0.0/24 +exclude: 192.168.0.1, web.example.com +port: 22, 443 +groups: + web_servers: "ports | selectattr('port', 'equalto', '443')" +""" import os import re @@ -94,6 +134,8 @@ from ansible.module_utils.common.text.converters import to_native, to_text from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable from ansible.module_utils.common.process import get_bin_path +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe + class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): @@ -110,6 +152,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): strict = self.get_option('strict') for host in hosts: + host = make_unsafe(host) hostname = host['name'] self.inventory.add_host(hostname) for var, value in host.items(): @@ -140,7 +183,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): try: self._nmap = get_bin_path('nmap') except ValueError as e: - raise AnsibleParserError('nmap inventory plugin requires the nmap cli tool to work: {0}'.format(to_native(e))) + raise AnsibleParserError(f'nmap inventory plugin requires the nmap cli tool to work: {e}') super(InventoryModule, self).parse(inventory, loader, path, cache=cache) @@ -168,39 +211,53 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): # setup command cmd = [self._nmap] - if self._options['sudo']: + if self.get_option('sudo'): cmd.insert(0, 'sudo') - if not self._options['ports']: + if self.get_option('port'): + cmd.append('-p') + cmd.append(self.get_option('port')) + + if not self.get_option('ports'): cmd.append('-sP') - if self._options['ipv4'] and not self._options['ipv6']: + if self.get_option('ipv4') and not self.get_option('ipv6'): cmd.append('-4') - elif self._options['ipv6'] and not self._options['ipv4']: + elif self.get_option('ipv6') and not self.get_option('ipv4'): cmd.append('-6') - elif not self._options['ipv6'] and not self._options['ipv4']: + elif not self.get_option('ipv6') and not self.get_option('ipv4'): raise AnsibleParserError('One of ipv4 or ipv6 must be enabled for this plugin') - if self._options['exclude']: + if self.get_option('exclude'): cmd.append('--exclude') - cmd.append(','.join(self._options['exclude'])) + cmd.append(','.join(self.get_option('exclude'))) - if self._options['dns_resolve']: + if self.get_option('dns_resolve'): cmd.append('-n') - if self._options['udp_scan']: + if self.get_option('dns_servers'): + cmd.append('--dns-servers') + cmd.append(','.join(self.get_option('dns_servers'))) + + if self.get_option('udp_scan'): cmd.append('-sU') - if self._options['icmp_timestamp']: + if self.get_option('icmp_timestamp'): cmd.append('-PP') - cmd.append(self._options['address']) + if self.get_option('open'): + cmd.append('--open') + + if not self.get_option('use_arp_ping'): + cmd.append('--disable-arp-ping') + + cmd.append(self.get_option('address')) try: # execute p = Popen(cmd, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() if p.returncode != 0: - raise AnsibleParserError('Failed to run nmap, rc=%s: %s' % (p.returncode, to_native(stderr))) + raise AnsibleParserError(f'Failed to run nmap, rc={p.returncode}: {to_native(stderr)}') # parse results host = None @@ -211,7 +268,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): try: t_stdout = to_text(stdout, errors='surrogate_or_strict') except UnicodeError as e: - raise AnsibleParserError('Invalid (non unicode) input returned: %s' % to_native(e)) + raise AnsibleParserError(f'Invalid (non unicode) input returned: {e}') for line in t_stdout.splitlines(): hits = self.find_host.match(line) @@ -252,7 +309,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): results[-1]['ports'] = ports except Exception as e: - raise AnsibleParserError("failed to parse %s: %s " % (to_native(path), to_native(e))) + raise AnsibleParserError(f"failed to parse {to_native(path)}: {e} ") if cache_needs_update: self._cache[cache_key] = results diff --git a/plugins/inventory/online.py b/plugins/inventory/online.py index 261548d8a2..cbc46a6723 100644 --- a/plugins/inventory/online.py +++ b/plugins/inventory/online.py @@ -1,52 +1,52 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' - name: online - author: - - Remy Leone (@remyleone) - short_description: Scaleway (previously Online SAS or Online.net) inventory source - description: - - Get inventory hosts from Scaleway (previously Online SAS or Online.net). - options: - plugin: - description: token that ensures this is a source file for the 'online' plugin. - required: true - choices: ['online', 'community.general.online'] - oauth_token: - required: true - description: Online OAuth token. - env: - # in order of precedence - - name: ONLINE_TOKEN - - name: ONLINE_API_KEY - - name: ONLINE_OAUTH_TOKEN - hostnames: - description: List of preference about what to use as an hostname. - type: list - elements: string - default: - - public_ipv4 - choices: - - public_ipv4 - - private_ipv4 - - hostname - groups: - description: List of groups. - type: list - elements: string - choices: - - location - - offer - - rpn -''' +DOCUMENTATION = r""" +name: online +author: + - Remy Leone (@remyleone) +short_description: Scaleway (previously Online SAS or Online.net) inventory source +description: + - Get inventory hosts from Scaleway (previously Online SAS or Online.net). +options: + plugin: + description: Token that ensures this is a source file for the P(community.general.online#inventory) plugin. + type: string + required: true + choices: ['online', 'community.general.online'] + oauth_token: + required: true + description: Online OAuth token. + type: string + env: + # in order of precedence + - name: ONLINE_TOKEN + - name: ONLINE_API_KEY + - name: ONLINE_OAUTH_TOKEN + hostnames: + description: List of preference about what to use as an hostname. + type: list + elements: string + default: + - public_ipv4 + choices: + - public_ipv4 + - private_ipv4 + - hostname + groups: + description: List of groups. + type: list + elements: string + choices: + - location + - offer + - rpn +""" -EXAMPLES = r''' +EXAMPLES = r""" # online_inventory.yml file in YAML format # Example command line: ansible-inventory --list -i online_inventory.yml @@ -57,17 +57,19 @@ groups: - location - offer - rpn -''' +""" import json from sys import version as python_version +from urllib.parse import urljoin from ansible.errors import AnsibleError from ansible.module_utils.urls import open_url from ansible.plugins.inventory import BaseInventoryPlugin -from ansible.module_utils.common.text.converters import to_native, to_text +from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.ansible_release import __version__ as ansible_version -from ansible.module_utils.six.moves.urllib.parse import urljoin + +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe class InventoryModule(BaseInventoryPlugin): @@ -134,7 +136,7 @@ class InventoryModule(BaseInventoryPlugin): try: response = open_url(url, headers=self.headers) except Exception as e: - self.display.warning("An error happened while fetching: %s" % url) + self.display.warning(f"An error happened while fetching: {url}") return None try: @@ -169,20 +171,20 @@ class InventoryModule(BaseInventoryPlugin): "support" ) for attribute in targeted_attributes: - self.inventory.set_variable(hostname, attribute, host_infos[attribute]) + self.inventory.set_variable(hostname, attribute, make_unsafe(host_infos[attribute])) if self.extract_public_ipv4(host_infos=host_infos): - self.inventory.set_variable(hostname, "public_ipv4", self.extract_public_ipv4(host_infos=host_infos)) - self.inventory.set_variable(hostname, "ansible_host", self.extract_public_ipv4(host_infos=host_infos)) + self.inventory.set_variable(hostname, "public_ipv4", make_unsafe(self.extract_public_ipv4(host_infos=host_infos))) + self.inventory.set_variable(hostname, "ansible_host", make_unsafe(self.extract_public_ipv4(host_infos=host_infos))) if self.extract_private_ipv4(host_infos=host_infos): - self.inventory.set_variable(hostname, "public_ipv4", self.extract_private_ipv4(host_infos=host_infos)) + self.inventory.set_variable(hostname, "public_ipv4", make_unsafe(self.extract_private_ipv4(host_infos=host_infos))) if self.extract_os_name(host_infos=host_infos): - self.inventory.set_variable(hostname, "os_name", self.extract_os_name(host_infos=host_infos)) + self.inventory.set_variable(hostname, "os_name", make_unsafe(self.extract_os_name(host_infos=host_infos))) if self.extract_os_version(host_infos=host_infos): - self.inventory.set_variable(hostname, "os_version", self.extract_os_name(host_infos=host_infos)) + self.inventory.set_variable(hostname, "os_version", make_unsafe(self.extract_os_name(host_infos=host_infos))) def _filter_host(self, host_infos, hostname_preferences): @@ -201,6 +203,8 @@ class InventoryModule(BaseInventoryPlugin): if not hostname: return + hostname = make_unsafe(hostname) + self.inventory.add_host(host=hostname) self._fill_host_variables(hostname=hostname, host_infos=host_infos) @@ -210,6 +214,8 @@ class InventoryModule(BaseInventoryPlugin): if not group: return + group = make_unsafe(group) + self.inventory.add_group(group=group) self.inventory.add_host(group=group, host=hostname) @@ -237,8 +243,8 @@ class InventoryModule(BaseInventoryPlugin): } self.headers = { - 'Authorization': "Bearer %s" % token, - 'User-Agent': "ansible %s Python %s" % (ansible_version, python_version.split(' ', 1)[0]), + 'Authorization': f"Bearer {token}", + 'User-Agent': f"ansible {ansible_version} Python {python_version.split(' ', 1)[0]}", 'Content-type': 'application/json' } diff --git a/plugins/inventory/opennebula.py b/plugins/inventory/opennebula.py index 603920edc2..26f7a21d88 100644 --- a/plugins/inventory/opennebula.py +++ b/plugins/inventory/opennebula.py @@ -1,83 +1,79 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2020, FELDSAM s.r.o. - FeldHost™ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = r''' - name: opennebula - author: - - Kristian Feldsam (@feldsam) - short_description: OpenNebula inventory source - version_added: "3.8.0" - extends_documentation_fragment: - - constructed +DOCUMENTATION = r""" +name: opennebula +author: + - Kristian Feldsam (@feldsam) +short_description: OpenNebula inventory source +version_added: "3.8.0" +extends_documentation_fragment: + - constructed +description: + - Get inventory hosts from OpenNebula cloud. + - Uses an YAML configuration file ending with either C(opennebula.yml) or C(opennebula.yaml) to set parameter values. + - Uses O(api_authfile), C(~/.one/one_auth), or E(ONE_AUTH) pointing to a OpenNebula credentials file. +options: + plugin: + description: Token that ensures this is a source file for the 'opennebula' plugin. + type: string + required: true + choices: [community.general.opennebula] + api_url: description: - - Get inventory hosts from OpenNebula cloud. - - Uses an YAML configuration file ending with either I(opennebula.yml) or I(opennebula.yaml) - to set parameter values. - - Uses I(api_authfile), C(~/.one/one_auth), or C(ONE_AUTH) pointing to a OpenNebula credentials file. - options: - plugin: - description: Token that ensures this is a source file for the 'opennebula' plugin. - type: string - required: true - choices: [ community.general.opennebula ] - api_url: - description: - - URL of the OpenNebula RPC server. - - It is recommended to use HTTPS so that the username/password are not - transferred over the network unencrypted. - - If not set then the value of the C(ONE_URL) environment variable is used. - env: - - name: ONE_URL - required: true - type: string - api_username: - description: - - Name of the user to login into the OpenNebula RPC server. If not set - then the value of the C(ONE_USERNAME) environment variable is used. - env: - - name: ONE_USERNAME - type: string - api_password: - description: - - Password or a token of the user to login into OpenNebula RPC server. - - If not set, the value of the C(ONE_PASSWORD) environment variable is used. - env: - - name: ONE_PASSWORD - required: false - type: string - api_authfile: - description: - - If both I(api_username) or I(api_password) are not set, then it will try - authenticate with ONE auth file. Default path is C(~/.one/one_auth). - - Set environment variable C(ONE_AUTH) to override this path. - env: - - name: ONE_AUTH - required: false - type: string - hostname: - description: Field to match the hostname. Note C(v4_first_ip) corresponds to the first IPv4 found on VM. - type: string - default: v4_first_ip - choices: - - v4_first_ip - - v6_first_ip - - name - filter_by_label: - description: Only return servers filtered by this label. - type: string - group_by_labels: - description: Create host groups by vm labels - type: bool - default: true -''' + - URL of the OpenNebula RPC server. + - It is recommended to use HTTPS so that the username/password are not transferred over the network unencrypted. + - If not set then the value of the E(ONE_URL) environment variable is used. + env: + - name: ONE_URL + required: true + type: string + api_username: + description: + - Name of the user to login into the OpenNebula RPC server. If not set then the value of the E(ONE_USERNAME) environment + variable is used. + env: + - name: ONE_USERNAME + type: string + api_password: + description: + - Password or a token of the user to login into OpenNebula RPC server. + - If not set, the value of the E(ONE_PASSWORD) environment variable is used. + env: + - name: ONE_PASSWORD + required: false + type: string + api_authfile: + description: + - If both O(api_username) or O(api_password) are not set, then it tries to authenticate with ONE auth file. Default + path is C(~/.one/one_auth). + - Set environment variable E(ONE_AUTH) to override this path. + env: + - name: ONE_AUTH + required: false + type: string + hostname: + description: Field to match the hostname. Note V(v4_first_ip) corresponds to the first IPv4 found on VM. + type: string + default: v4_first_ip + choices: + - v4_first_ip + - v6_first_ip + - name + filter_by_label: + description: Only return servers filtered by this label. + type: string + group_by_labels: + description: Create host groups by VM labels. + type: bool + default: true +""" -EXAMPLES = r''' +EXAMPLES = r""" # inventory_opennebula.yml file in YAML format # Example command line: ansible-inventory --list -i inventory_opennebula.yml @@ -85,7 +81,7 @@ EXAMPLES = r''' plugin: community.general.opennebula api_url: https://opennebula:2633/RPC2 filter_by_label: Cache -''' +""" try: import pyone @@ -96,7 +92,8 @@ except ImportError: from ansible.errors import AnsibleError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable -from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe from collections import namedtuple import os @@ -126,9 +123,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable): authstring = fp.read().rstrip() username, password = authstring.split(":") except (OSError, IOError): - raise AnsibleError("Could not find or read ONE_AUTH file at '{e}'".format(e=authfile)) + raise AnsibleError(f"Could not find or read ONE_AUTH file at '{authfile}'") except Exception: - raise AnsibleError("Error occurs when reading ONE_AUTH file at '{e}'".format(e=authfile)) + raise AnsibleError(f"Error occurs when reading ONE_AUTH file at '{authfile}'") auth_params = namedtuple('auth', ('url', 'username', 'password')) @@ -141,7 +138,8 @@ class InventoryModule(BaseInventoryPlugin, Constructable): nic = [nic] for net in nic: - return net['IP'] + if net.get('IP'): + return net['IP'] return False @@ -163,13 +161,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable): if not (auth.username and auth.password): raise AnsibleError('API Credentials missing. Check OpenNebula inventory file.') else: - one_client = pyone.OneServer(auth.url, session=auth.username + ':' + auth.password) + one_client = pyone.OneServer(auth.url, session=f"{auth.username}:{auth.password}") # get hosts (VMs) try: vm_pool = one_client.vmpool.infoextended(-2, -1, -1, 3) except Exception as e: - raise AnsibleError("Something happened during XML-RPC call: {e}".format(e=to_native(e))) + raise AnsibleError(f"Something happened during XML-RPC call: {e}") return vm_pool @@ -196,6 +194,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable): continue server['name'] = vm.NAME + server['id'] = vm.ID + if hasattr(vm.HISTORY_RECORDS, 'HISTORY') and vm.HISTORY_RECORDS.HISTORY: + server['host'] = vm.HISTORY_RECORDS.HISTORY[-1].HOSTNAME server['LABELS'] = labels server['v4_first_ip'] = self._get_vm_ipv4(vm) server['v6_first_ip'] = self._get_vm_ipv6(vm) @@ -215,6 +216,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): filter_by_label = self.get_option('filter_by_label') servers = self._retrieve_servers(filter_by_label) for server in servers: + server = make_unsafe(server) hostname = server['name'] # check for labels if group_by_labels and server['LABELS']: diff --git a/plugins/inventory/proxmox.py b/plugins/inventory/proxmox.py deleted file mode 100644 index e33f7ed77d..0000000000 --- a/plugins/inventory/proxmox.py +++ /dev/null @@ -1,639 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2016 Guido Günther , Daniel Lobato Garcia -# Copyright (c) 2018 Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - -DOCUMENTATION = ''' - name: proxmox - short_description: Proxmox inventory source - version_added: "1.2.0" - author: - - Jeffrey van Pelt (@Thulium-Drake) - requirements: - - requests >= 1.1 - description: - - Get inventory hosts from a Proxmox PVE cluster. - - "Uses a configuration file as an inventory source, it must end in C(.proxmox.yml) or C(.proxmox.yaml)" - - Will retrieve the first network interface with an IP for Proxmox nodes. - - Can retrieve LXC/QEMU configuration as facts. - extends_documentation_fragment: - - constructed - - inventory_cache - options: - plugin: - description: The name of this plugin, it should always be set to C(community.general.proxmox) for this plugin to recognize it as it's own. - required: true - choices: ['community.general.proxmox'] - type: str - url: - description: - - URL to Proxmox cluster. - - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_URL) will be used instead. - - Since community.general 4.7.0 you can also use templating to specify the value of the I(url). - default: 'http://localhost:8006' - type: str - env: - - name: PROXMOX_URL - version_added: 2.0.0 - user: - description: - - Proxmox authentication user. - - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_USER) will be used instead. - - Since community.general 4.7.0 you can also use templating to specify the value of the I(user). - required: true - type: str - env: - - name: PROXMOX_USER - version_added: 2.0.0 - password: - description: - - Proxmox authentication password. - - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_PASSWORD) will be used instead. - - Since community.general 4.7.0 you can also use templating to specify the value of the I(password). - - If you do not specify a password, you must set I(token_id) and I(token_secret) instead. - type: str - env: - - name: PROXMOX_PASSWORD - version_added: 2.0.0 - token_id: - description: - - Proxmox authentication token ID. - - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_TOKEN_ID) will be used instead. - - To use token authentication, you must also specify I(token_secret). If you do not specify I(token_id) and I(token_secret), - you must set a password instead. - - Make sure to grant explicit pve permissions to the token or disable 'privilege separation' to use the users' privileges instead. - version_added: 4.8.0 - type: str - env: - - name: PROXMOX_TOKEN_ID - token_secret: - description: - - Proxmox authentication token secret. - - If the value is not specified in the inventory configuration, the value of environment variable C(PROXMOX_TOKEN_SECRET) will be used instead. - - To use token authentication, you must also specify I(token_id). If you do not specify I(token_id) and I(token_secret), - you must set a password instead. - version_added: 4.8.0 - type: str - env: - - name: PROXMOX_TOKEN_SECRET - validate_certs: - description: Verify SSL certificate if using HTTPS. - type: boolean - default: true - group_prefix: - description: Prefix to apply to Proxmox groups. - default: proxmox_ - type: str - facts_prefix: - description: Prefix to apply to LXC/QEMU config facts. - default: proxmox_ - type: str - want_facts: - description: - - Gather LXC/QEMU configuration facts. - - When I(want_facts) is set to C(true) more details about QEMU VM status are possible, besides the running and stopped states. - Currently if the VM is running and it is suspended, the status will be running and the machine will be in C(running) group, - but its actual state will be paused. See I(qemu_extended_statuses) for how to retrieve the real status. - default: false - type: bool - qemu_extended_statuses: - description: - - Requires I(want_facts) to be set to C(true) to function. This will allow you to differentiate betweend C(paused) and C(prelaunch) - statuses of the QEMU VMs. - - This introduces multiple groups [prefixed with I(group_prefix)] C(prelaunch) and C(paused). - default: false - type: bool - version_added: 5.1.0 - want_proxmox_nodes_ansible_host: - version_added: 3.0.0 - description: - - Whether to set C(ansbile_host) for proxmox nodes. - - When set to C(true) (default), will use the first available interface. This can be different from what you expect. - - The default of this option changed from C(true) to C(false) in community.general 6.0.0. - type: bool - default: false - filters: - version_added: 4.6.0 - description: A list of Jinja templates that allow filtering hosts. - type: list - elements: str - default: [] - strict: - version_added: 2.5.0 - compose: - version_added: 2.5.0 - groups: - version_added: 2.5.0 - keyed_groups: - version_added: 2.5.0 -''' - -EXAMPLES = ''' -# Minimal example which will not gather additional facts for QEMU/LXC guests -# By not specifying a URL the plugin will attempt to connect to the controller host on port 8006 -# my.proxmox.yml -plugin: community.general.proxmox -user: ansible@pve -password: secure -# Note that this can easily give you wrong values as ansible_host. See further below for -# an example where this is set to `false` and where ansible_host is set with `compose`. -want_proxmox_nodes_ansible_host: true - -# Instead of login with password, proxmox supports api token authentication since release 6.2. -plugin: community.general.proxmox -user: ci@pve -token_id: gitlab-1 -token_secret: fa256e9c-26ab-41ec-82da-707a2c079829 - -# The secret can also be a vault string or passed via the environment variable TOKEN_SECRET. -token_secret: !vault | - $ANSIBLE_VAULT;1.1;AES256 - 62353634333163633336343265623632626339313032653563653165313262343931643431656138 - 6134333736323265656466646539663134306166666237630a653363623262636663333762316136 - 34616361326263383766366663393837626437316462313332663736623066656237386531663731 - 3037646432383064630a663165303564623338666131353366373630656661333437393937343331 - 32643131386134396336623736393634373936356332623632306561356361323737313663633633 - 6231313333666361656537343562333337323030623732323833 - -# More complete example demonstrating the use of 'want_facts' and the constructed options -# Note that using facts returned by 'want_facts' in constructed options requires 'want_facts=true' -# my.proxmox.yml -plugin: community.general.proxmox -url: http://pve.domain.com:8006 -user: ansible@pve -password: secure -validate_certs: false -want_facts: true -keyed_groups: - # proxmox_tags_parsed is an example of a fact only returned when 'want_facts=true' - - key: proxmox_tags_parsed - separator: "" - prefix: group -groups: - webservers: "'web' in (proxmox_tags_parsed|list)" - mailservers: "'mail' in (proxmox_tags_parsed|list)" -compose: - ansible_port: 2222 -# Note that this can easily give you wrong values as ansible_host. See further below for -# an example where this is set to `false` and where ansible_host is set with `compose`. -want_proxmox_nodes_ansible_host: true - -# Using the inventory to allow ansible to connect via the first IP address of the VM / Container -# (Default is connection by name of QEMU/LXC guests) -# Note: my_inv_var demonstrates how to add a string variable to every host used by the inventory. -# my.proxmox.yml -plugin: community.general.proxmox -url: http://pve.domain.com:8006 -user: ansible@pve -password: secure -validate_certs: false -want_facts: true -want_proxmox_nodes_ansible_host: false -compose: - ansible_host: proxmox_ipconfig0.ip | default(proxmox_net0.ip) | ipaddr('address') - my_inv_var_1: "'my_var1_value'" - my_inv_var_2: > - "my_var_2_value" - -# Specify the url, user and password using templating -# my.proxmox.yml -plugin: community.general.proxmox -url: "{{ lookup('ansible.builtin.ini', 'url', section='proxmox', file='file.ini') }}" -user: "{{ lookup('ansible.builtin.env','PM_USER') | default('ansible@pve') }}" -password: "{{ lookup('community.general.random_string', base64=True) }}" -# Note that this can easily give you wrong values as ansible_host. See further up for -# an example where this is set to `false` and where ansible_host is set with `compose`. -want_proxmox_nodes_ansible_host: true - -''' - -import itertools -import re - -from ansible.module_utils.common._collections_compat import MutableMapping - -from ansible.errors import AnsibleError -from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.six import string_types -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.utils.display import Display - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -# 3rd party imports -try: - import requests - if LooseVersion(requests.__version__) < LooseVersion('1.1.0'): - raise ImportError - HAS_REQUESTS = True -except ImportError: - HAS_REQUESTS = False - -display = Display() - - -class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): - ''' Host inventory parser for ansible using Proxmox as source. ''' - - NAME = 'community.general.proxmox' - - def __init__(self): - - super(InventoryModule, self).__init__() - - # from config - self.proxmox_url = None - - self.session = None - self.cache_key = None - self.use_cache = None - - def verify_file(self, path): - - valid = False - if super(InventoryModule, self).verify_file(path): - if path.endswith(('proxmox.yaml', 'proxmox.yml')): - valid = True - else: - self.display.vvv('Skipping due to inventory source not ending in "proxmox.yaml" nor "proxmox.yml"') - return valid - - def _get_session(self): - if not self.session: - self.session = requests.session() - self.session.verify = self.get_option('validate_certs') - return self.session - - def _get_auth(self): - credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password, }) - - if self.proxmox_password: - - credentials = urlencode({'username': self.proxmox_user, 'password': self.proxmox_password, }) - - a = self._get_session() - ret = a.post('%s/api2/json/access/ticket' % self.proxmox_url, data=credentials) - - json = ret.json() - - self.headers = { - # only required for POST/PUT/DELETE methods, which we are not using currently - # 'CSRFPreventionToken': json['data']['CSRFPreventionToken'], - 'Cookie': 'PVEAuthCookie={0}'.format(json['data']['ticket']) - } - - else: - - self.headers = {'Authorization': 'PVEAPIToken={0}!{1}={2}'.format(self.proxmox_user, self.proxmox_token_id, self.proxmox_token_secret)} - - def _get_json(self, url, ignore_errors=None): - - if not self.use_cache or url not in self._cache.get(self.cache_key, {}): - - if self.cache_key not in self._cache: - self._cache[self.cache_key] = {'url': ''} - - data = [] - s = self._get_session() - while True: - ret = s.get(url, headers=self.headers) - if ignore_errors and ret.status_code in ignore_errors: - break - ret.raise_for_status() - json = ret.json() - - # process results - # FIXME: This assumes 'return type' matches a specific query, - # it will break if we expand the queries and they dont have different types - if 'data' not in json: - # /hosts/:id does not have a 'data' key - data = json - break - elif isinstance(json['data'], MutableMapping): - # /facts are returned as dict in 'data' - data = json['data'] - break - else: - # /hosts 's 'results' is a list of all hosts, returned is paginated - data = data + json['data'] - break - - self._cache[self.cache_key][url] = data - - return self._cache[self.cache_key][url] - - def _get_nodes(self): - return self._get_json("%s/api2/json/nodes" % self.proxmox_url) - - def _get_pools(self): - return self._get_json("%s/api2/json/pools" % self.proxmox_url) - - def _get_lxc_per_node(self, node): - return self._get_json("%s/api2/json/nodes/%s/lxc" % (self.proxmox_url, node)) - - def _get_qemu_per_node(self, node): - return self._get_json("%s/api2/json/nodes/%s/qemu" % (self.proxmox_url, node)) - - def _get_members_per_pool(self, pool): - ret = self._get_json("%s/api2/json/pools/%s" % (self.proxmox_url, pool)) - return ret['members'] - - def _get_node_ip(self, node): - ret = self._get_json("%s/api2/json/nodes/%s/network" % (self.proxmox_url, node)) - - for iface in ret: - try: - return iface['address'] - except Exception: - return None - - def _get_agent_network_interfaces(self, node, vmid, vmtype): - result = [] - - try: - ifaces = self._get_json( - "%s/api2/json/nodes/%s/%s/%s/agent/network-get-interfaces" % ( - self.proxmox_url, node, vmtype, vmid - ) - )['result'] - - if "error" in ifaces: - if "class" in ifaces["error"]: - # This happens on Windows, even though qemu agent is running, the IP address - # cannot be fetched, as it's unsupported, also a command disabled can happen. - errorClass = ifaces["error"]["class"] - if errorClass in ["Unsupported"]: - self.display.v("Retrieving network interfaces from guest agents on windows with older qemu-guest-agents is not supported") - elif errorClass in ["CommandDisabled"]: - self.display.v("Retrieving network interfaces from guest agents has been disabled") - return result - - for iface in ifaces: - result.append({ - 'name': iface['name'], - 'mac-address': iface['hardware-address'] if 'hardware-address' in iface else '', - 'ip-addresses': ["%s/%s" % (ip['ip-address'], ip['prefix']) for ip in iface['ip-addresses']] if 'ip-addresses' in iface else [] - }) - except requests.HTTPError: - pass - - return result - - def _get_vm_config(self, properties, node, vmid, vmtype, name): - ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/config" % (self.proxmox_url, node, vmtype, vmid)) - - properties[self._fact('node')] = node - properties[self._fact('vmid')] = vmid - properties[self._fact('vmtype')] = vmtype - - plaintext_configs = [ - 'description', - ] - - for config in ret: - key = self._fact(config) - value = ret[config] - try: - # fixup disk images as they have no key - if config == 'rootfs' or config.startswith(('virtio', 'sata', 'ide', 'scsi')): - value = ('disk_image=' + value) - - # Additional field containing parsed tags as list - if config == 'tags': - stripped_value = value.strip() - if stripped_value: - parsed_key = key + "_parsed" - properties[parsed_key] = [tag.strip() for tag in stripped_value.replace(',', ';').split(";")] - - # The first field in the agent string tells you whether the agent is enabled - # the rest of the comma separated string is extra config for the agent. - # In some (newer versions of proxmox) instances it can be 'enabled=1'. - if config == 'agent': - agent_enabled = 0 - try: - agent_enabled = int(value.split(',')[0]) - except ValueError: - if value.split(',')[0] == "enabled=1": - agent_enabled = 1 - if agent_enabled: - agent_iface_value = self._get_agent_network_interfaces(node, vmid, vmtype) - if agent_iface_value: - agent_iface_key = self.to_safe('%s%s' % (key, "_interfaces")) - properties[agent_iface_key] = agent_iface_value - - if config == 'lxc': - out_val = {} - for k, v in value: - if k.startswith('lxc.'): - k = k[len('lxc.'):] - out_val[k] = v - value = out_val - - if config not in plaintext_configs and isinstance(value, string_types) \ - and all("=" in v for v in value.split(",")): - # split off strings with commas to a dict - # skip over any keys that cannot be processed - try: - value = dict(key.split("=", 1) for key in value.split(",")) - except Exception: - continue - - properties[key] = value - except NameError: - return None - - def _get_vm_status(self, properties, node, vmid, vmtype, name): - ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/status/current" % (self.proxmox_url, node, vmtype, vmid)) - properties[self._fact('status')] = ret['status'] - if vmtype == 'qemu': - properties[self._fact('qmpstatus')] = ret['qmpstatus'] - - def _get_vm_snapshots(self, properties, node, vmid, vmtype, name): - ret = self._get_json("%s/api2/json/nodes/%s/%s/%s/snapshot" % (self.proxmox_url, node, vmtype, vmid)) - snapshots = [snapshot['name'] for snapshot in ret if snapshot['name'] != 'current'] - properties[self._fact('snapshots')] = snapshots - - def to_safe(self, word): - '''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups - #> ProxmoxInventory.to_safe("foo-bar baz") - 'foo_barbaz' - ''' - regex = r"[^A-Za-z0-9\_]" - return re.sub(regex, "_", word.replace(" ", "")) - - def _fact(self, name): - '''Generate a fact's full name from the common prefix and a name.''' - return self.to_safe('%s%s' % (self.facts_prefix, name.lower())) - - def _group(self, name): - '''Generate a group's full name from the common prefix and a name.''' - return self.to_safe('%s%s' % (self.group_prefix, name.lower())) - - def _can_add_host(self, name, properties): - '''Ensure that a host satisfies all defined hosts filters. If strict mode is - enabled, any error during host filter compositing will lead to an AnsibleError - being raised, otherwise the filter will be ignored. - ''' - for host_filter in self.host_filters: - try: - if not self._compose(host_filter, properties): - return False - except Exception as e: # pylint: disable=broad-except - message = "Could not evaluate host filter %s for host %s - %s" % (host_filter, name, to_native(e)) - if self.strict: - raise AnsibleError(message) - display.warning(message) - return True - - def _add_host(self, name, variables): - self.inventory.add_host(name) - for k, v in variables.items(): - self.inventory.set_variable(name, k, v) - variables = self.inventory.get_host(name).get_vars() - self._set_composite_vars(self.get_option('compose'), variables, name, strict=self.strict) - self._add_host_to_composed_groups(self.get_option('groups'), variables, name, strict=self.strict) - self._add_host_to_keyed_groups(self.get_option('keyed_groups'), variables, name, strict=self.strict) - - def _handle_item(self, node, ittype, item): - '''Handle an item from the list of LXC containers and Qemu VM. The - return value will be either None if the item was skipped or the name of - the item if it was added to the inventory.''' - if item.get('template'): - return None - - properties = dict() - name, vmid = item['name'], item['vmid'] - - # get status, config and snapshots if want_facts == True - want_facts = self.get_option('want_facts') - if want_facts: - self._get_vm_status(properties, node, vmid, ittype, name) - self._get_vm_config(properties, node, vmid, ittype, name) - self._get_vm_snapshots(properties, node, vmid, ittype, name) - - # ensure the host satisfies filters - if not self._can_add_host(name, properties): - return None - - # add the host to the inventory - self._add_host(name, properties) - node_type_group = self._group('%s_%s' % (node, ittype)) - self.inventory.add_child(self._group('all_' + ittype), name) - self.inventory.add_child(node_type_group, name) - - item_status = item['status'] - if item_status == 'running': - if want_facts and ittype == 'qemu' and self.get_option('qemu_extended_statuses'): - # get more details about the status of the qemu VM - item_status = properties.get(self._fact('qmpstatus'), item_status) - self.inventory.add_child(self._group('all_%s' % (item_status, )), name) - - return name - - def _populate_pool_groups(self, added_hosts): - '''Generate groups from Proxmox resource pools, ignoring VMs and - containers that were skipped.''' - for pool in self._get_pools(): - poolid = pool.get('poolid') - if not poolid: - continue - pool_group = self._group('pool_' + poolid) - self.inventory.add_group(pool_group) - - for member in self._get_members_per_pool(poolid): - name = member.get('name') - if name and name in added_hosts: - self.inventory.add_child(pool_group, name) - - def _populate(self): - - # create common groups - default_groups = ['lxc', 'qemu', 'running', 'stopped'] - - if self.get_option('qemu_extended_statuses'): - default_groups.extend(['prelaunch', 'paused']) - - for group in default_groups: - self.inventory.add_group(self._group('all_%s' % (group))) - - nodes_group = self._group('nodes') - self.inventory.add_group(nodes_group) - - want_proxmox_nodes_ansible_host = self.get_option("want_proxmox_nodes_ansible_host") - - # gather vm's on nodes - self._get_auth() - hosts = [] - for node in self._get_nodes(): - if not node.get('node'): - continue - - self.inventory.add_host(node['node']) - if node['type'] == 'node': - self.inventory.add_child(nodes_group, node['node']) - - if node['status'] == 'offline': - continue - - # get node IP address - if want_proxmox_nodes_ansible_host: - ip = self._get_node_ip(node['node']) - self.inventory.set_variable(node['node'], 'ansible_host', ip) - - # add LXC/Qemu groups for the node - for ittype in ('lxc', 'qemu'): - node_type_group = self._group('%s_%s' % (node['node'], ittype)) - self.inventory.add_group(node_type_group) - - # get LXC containers and Qemu VMs for this node - lxc_objects = zip(itertools.repeat('lxc'), self._get_lxc_per_node(node['node'])) - qemu_objects = zip(itertools.repeat('qemu'), self._get_qemu_per_node(node['node'])) - for ittype, item in itertools.chain(lxc_objects, qemu_objects): - name = self._handle_item(node['node'], ittype, item) - if name is not None: - hosts.append(name) - - # gather vm's in pools - self._populate_pool_groups(hosts) - - def parse(self, inventory, loader, path, cache=True): - if not HAS_REQUESTS: - raise AnsibleError('This module requires Python Requests 1.1.0 or higher: ' - 'https://github.com/psf/requests.') - - super(InventoryModule, self).parse(inventory, loader, path) - - # read config from file, this sets 'options' - self._read_config_data(path) - - # read and template auth options - for o in ('url', 'user', 'password', 'token_id', 'token_secret'): - v = self.get_option(o) - if self.templar.is_template(v): - v = self.templar.template(v, disable_lookups=False) - setattr(self, 'proxmox_%s' % o, v) - - # some more cleanup and validation - self.proxmox_url = self.proxmox_url.rstrip('/') - - if self.proxmox_password is None and (self.proxmox_token_id is None or self.proxmox_token_secret is None): - raise AnsibleError('You must specify either a password or both token_id and token_secret.') - - if self.get_option('qemu_extended_statuses') and not self.get_option('want_facts'): - raise AnsibleError('You must set want_facts to True if you want to use qemu_extended_statuses.') - - # read rest of options - self.cache_key = self.get_cache_key(path) - self.use_cache = cache and self.get_option('cache') - self.host_filters = self.get_option('filters') - self.group_prefix = self.get_option('group_prefix') - self.facts_prefix = self.get_option('facts_prefix') - self.strict = self.get_option('strict') - - # actually populate inventory - self._populate() diff --git a/plugins/inventory/scaleway.py b/plugins/inventory/scaleway.py index 6aacc9f665..59c19b498b 100644 --- a/plugins/inventory/scaleway.py +++ b/plugins/inventory/scaleway.py @@ -1,80 +1,85 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = r''' - name: scaleway - author: - - Remy Leone (@remyleone) - short_description: Scaleway inventory source +DOCUMENTATION = r""" +name: scaleway +author: + - Remy Leone (@remyleone) +short_description: Scaleway inventory source +description: + - Get inventory hosts from Scaleway. +requirements: + - PyYAML +options: + plugin: + description: Token that ensures this is a source file for the 'scaleway' plugin. + required: true + type: string + choices: ['scaleway', 'community.general.scaleway'] + regions: + description: Filter results on a specific Scaleway region. + type: list + elements: string + default: + - ams1 + - ams2 + - ams3 + - par1 + - par2 + - par3 + - waw1 + - waw2 + - waw3 + tags: + description: Filter results on a specific tag. + type: list + elements: string + scw_profile: description: - - Get inventory hosts from Scaleway. - requirements: - - PyYAML - options: - plugin: - description: Token that ensures this is a source file for the 'scaleway' plugin. - required: true - choices: ['scaleway', 'community.general.scaleway'] - regions: - description: Filter results on a specific Scaleway region. - type: list - elements: string - default: - - ams1 - - par1 - - par2 - - waw1 - tags: - description: Filter results on a specific tag. - type: list - elements: string - scw_profile: - description: - - The config profile to use in config file. - - By default uses the one specified as C(active_profile) in the config file, or falls back to C(default) if that is not defined. - type: string - version_added: 4.4.0 - oauth_token: - description: - - Scaleway OAuth token. - - If not explicitly defined or in environment variables, it will try to lookup in the scaleway-cli configuration file - (C($SCW_CONFIG_PATH), C($XDG_CONFIG_HOME/scw/config.yaml), or C(~/.config/scw/config.yaml)). - - More details on L(how to generate token, https://www.scaleway.com/en/docs/generate-api-keys/). - env: - # in order of precedence - - name: SCW_TOKEN - - name: SCW_API_KEY - - name: SCW_OAUTH_TOKEN - hostnames: - description: List of preference about what to use as an hostname. - type: list - elements: string - default: - - public_ipv4 - choices: - - public_ipv4 - - private_ipv4 - - public_ipv6 - - hostname - - id - variables: - description: 'Set individual variables: keys are variable names and - values are templates. Any value returned by the - L(Scaleway API, https://developer.scaleway.com/#servers-server-get) - can be used.' - type: dict -''' + - The config profile to use in config file. + - By default uses the one specified as C(active_profile) in the config file, or falls back to V(default) if that is + not defined. + type: string + version_added: 4.4.0 + oauth_token: + description: + - Scaleway OAuth token. + - If not explicitly defined or in environment variables, it tries to lookup in the C(scaleway-cli) configuration file + (C($SCW_CONFIG_PATH), C($XDG_CONFIG_HOME/scw/config.yaml), or C(~/.config/scw/config.yaml)). + - More details on L(how to generate token, https://www.scaleway.com/en/docs/generate-api-keys/). + type: string + env: + # in order of precedence + - name: SCW_TOKEN + - name: SCW_API_KEY + - name: SCW_OAUTH_TOKEN + hostnames: + description: List of preference about what to use as an hostname. + type: list + elements: string + default: + - public_ipv4 + choices: + - public_ipv4 + - private_ipv4 + - public_ipv6 + - hostname + - id + variables: + description: 'Set individual variables: keys are variable names and values are templates. Any value returned by the L(Scaleway + API, https://developer.scaleway.com/#servers-server-get) can be used.' + type: dict +""" -EXAMPLES = r''' +EXAMPLES = r""" # scaleway_inventory.yml file in YAML format # Example command line: ansible-inventory --list -i scaleway_inventory.yml +--- # use hostname as inventory_hostname # use the private IP address to connect to the host plugin: community.general.scaleway @@ -89,6 +94,7 @@ variables: ansible_host: private_ip state: state +--- # use hostname as inventory_hostname and public IP address to connect to the host plugin: community.general.scaleway hostnames: @@ -98,6 +104,7 @@ regions: variables: ansible_host: public_ip.address +--- # Using static strings as variables plugin: community.general.scaleway hostnames: @@ -106,7 +113,7 @@ variables: ansible_host: public_ip.address ansible_connection: "'ssh'" ansible_user: "'admin'" -''' +""" import os import json @@ -121,11 +128,11 @@ else: from ansible.errors import AnsibleError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable from ansible_collections.community.general.plugins.module_utils.scaleway import SCALEWAY_LOCATION, parse_pagination_link +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe from ansible.module_utils.urls import open_url -from ansible.module_utils.common.text.converters import to_native, to_text -from ansible.module_utils.six import raise_from +from ansible.module_utils.common.text.converters import to_text -import ansible.module_utils.six.moves.urllib.parse as urllib_parse +import urllib.parse as urllib_parse def _fetch_information(token, url): @@ -137,7 +144,7 @@ def _fetch_information(token, url): headers={'X-Auth-Token': token, 'Content-type': 'application/json'}) except Exception as e: - raise AnsibleError("Error while fetching %s: %s" % (url, to_native(e))) + raise AnsibleError(f"Error while fetching {url}: {e}") try: raw_json = json.loads(to_text(response.read())) except ValueError: @@ -158,7 +165,7 @@ def _fetch_information(token, url): def _build_server_url(api_endpoint): - return "/".join([api_endpoint, "servers"]) + return f"{api_endpoint}/servers" def extract_public_ipv4(server_info): @@ -279,7 +286,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): zone_info = SCALEWAY_LOCATION[zone] url = _build_server_url(zone_info["api_endpoint"]) - raw_zone_hosts_infos = _fetch_information(url=url, token=token) + raw_zone_hosts_infos = make_unsafe(_fetch_information(url=url, token=token)) for host_infos in raw_zone_hosts_infos: @@ -329,7 +336,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): def parse(self, inventory, loader, path, cache=True): if YAML_IMPORT_ERROR: - raise_from(AnsibleError('PyYAML is probably missing'), YAML_IMPORT_ERROR) + raise AnsibleError('PyYAML is probably missing') from YAML_IMPORT_ERROR super(InventoryModule, self).parse(inventory, loader, path) self._read_config_data(path=path) @@ -341,4 +348,4 @@ class InventoryModule(BaseInventoryPlugin, Constructable): hostname_preference = self.get_option("hostnames") for zone in self._get_zones(config_zones): - self.do_zone_inventory(zone=zone, token=token, tags=tags, hostname_preferences=hostname_preference) + self.do_zone_inventory(zone=make_unsafe(zone), token=token, tags=tags, hostname_preferences=hostname_preference) diff --git a/plugins/inventory/stackpath_compute.py b/plugins/inventory/stackpath_compute.py deleted file mode 100644 index 39f880e820..0000000000 --- a/plugins/inventory/stackpath_compute.py +++ /dev/null @@ -1,283 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2020 Shay Rybak -# Copyright (c) 2020 Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - name: stackpath_compute - short_description: StackPath Edge Computing inventory source - version_added: 1.2.0 - author: - - UNKNOWN (@shayrybak) - extends_documentation_fragment: - - inventory_cache - - constructed - description: - - Get inventory hosts from StackPath Edge Computing. - - Uses a YAML configuration file that ends with stackpath_compute.(yml|yaml). - options: - plugin: - description: - - A token that ensures this is a source file for the plugin. - required: true - choices: ['community.general.stackpath_compute'] - client_id: - description: - - An OAuth client ID generated from the API Management section of the StackPath customer portal - U(https://control.stackpath.net/api-management). - required: true - type: str - client_secret: - description: - - An OAuth client secret generated from the API Management section of the StackPath customer portal - U(https://control.stackpath.net/api-management). - required: true - type: str - stack_slugs: - description: - - A list of Stack slugs to query instances in. If no entry then get instances in all stacks on the account. - type: list - elements: str - use_internal_ip: - description: - - Whether or not to use internal IP addresses, If false, uses external IP addresses, internal otherwise. - - If an instance doesn't have an external IP it will not be returned when this option is set to false. - type: bool -''' - -EXAMPLES = ''' -# Example using credentials to fetch all workload instances in a stack. ---- -plugin: community.general.stackpath_compute -client_id: my_client_id -client_secret: my_client_secret -stack_slugs: -- my_first_stack_slug -- my_other_stack_slug -use_internal_ip: false -''' - -import traceback -import json - -from ansible.errors import AnsibleError -from ansible.module_utils.urls import open_url -from ansible.plugins.inventory import ( - BaseInventoryPlugin, - Constructable, - Cacheable -) -from ansible.utils.display import Display - - -display = Display() - - -class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): - - NAME = 'community.general.stackpath_compute' - - def __init__(self): - super(InventoryModule, self).__init__() - - # credentials - self.client_id = None - self.client_secret = None - self.stack_slug = None - self.api_host = "https://gateway.stackpath.com" - self.group_keys = [ - "stackSlug", - "workloadId", - "cityCode", - "countryCode", - "continent", - "target", - "name", - "workloadSlug" - ] - - def _validate_config(self, config): - if config['plugin'] != 'community.general.stackpath_compute': - raise AnsibleError("plugin doesn't match this plugin") - try: - client_id = config['client_id'] - if len(client_id) != 32: - raise AnsibleError("client_id must be 32 characters long") - except KeyError: - raise AnsibleError("config missing client_id, a required option") - try: - client_secret = config['client_secret'] - if len(client_secret) != 64: - raise AnsibleError("client_secret must be 64 characters long") - except KeyError: - raise AnsibleError("config missing client_id, a required option") - return True - - def _set_credentials(self): - ''' - :param config_data: contents of the inventory config file - ''' - self.client_id = self.get_option('client_id') - self.client_secret = self.get_option('client_secret') - - def _authenticate(self): - payload = json.dumps( - { - "client_id": self.client_id, - "client_secret": self.client_secret, - "grant_type": "client_credentials", - } - ) - headers = { - "Content-Type": "application/json", - } - resp = open_url( - self.api_host + '/identity/v1/oauth2/token', - headers=headers, - data=payload, - method="POST" - ) - status_code = resp.code - if status_code == 200: - body = resp.read() - self.auth_token = json.loads(body)["access_token"] - - def _query(self): - results = [] - workloads = [] - self._authenticate() - for stack_slug in self.stack_slugs: - try: - workloads = self._stackpath_query_get_list(self.api_host + '/workload/v1/stacks/' + stack_slug + '/workloads') - except Exception: - raise AnsibleError("Failed to get workloads from the StackPath API: %s" % traceback.format_exc()) - for workload in workloads: - try: - workload_instances = self._stackpath_query_get_list( - self.api_host + '/workload/v1/stacks/' + stack_slug + '/workloads/' + workload["id"] + '/instances' - ) - except Exception: - raise AnsibleError("Failed to get workload instances from the StackPath API: %s" % traceback.format_exc()) - for instance in workload_instances: - if instance["phase"] == "RUNNING": - instance["stackSlug"] = stack_slug - instance["workloadId"] = workload["id"] - instance["workloadSlug"] = workload["slug"] - instance["cityCode"] = instance["location"]["cityCode"] - instance["countryCode"] = instance["location"]["countryCode"] - instance["continent"] = instance["location"]["continent"] - instance["target"] = instance["metadata"]["labels"]["workload.platform.stackpath.net/target-name"] - try: - if instance[self.hostname_key]: - results.append(instance) - except KeyError: - pass - return results - - def _populate(self, instances): - for instance in instances: - for group_key in self.group_keys: - group = group_key + "_" + instance[group_key] - group = group.lower().replace(" ", "_").replace("-", "_") - self.inventory.add_group(group) - self.inventory.add_host(instance[self.hostname_key], - group=group) - - def _stackpath_query_get_list(self, url): - self._authenticate() - headers = { - "Content-Type": "application/json", - "Authorization": "Bearer " + self.auth_token, - } - next_page = True - result = [] - cursor = '-1' - while next_page: - resp = open_url( - url + '?page_request.first=10&page_request.after=%s' % cursor, - headers=headers, - method="GET" - ) - status_code = resp.code - if status_code == 200: - body = resp.read() - body_json = json.loads(body) - result.extend(body_json["results"]) - next_page = body_json["pageInfo"]["hasNextPage"] - if next_page: - cursor = body_json["pageInfo"]["endCursor"] - return result - - def _get_stack_slugs(self, stacks): - self.stack_slugs = [stack["slug"] for stack in stacks] - - def verify_file(self, path): - ''' - :param loader: an ansible.parsing.dataloader.DataLoader object - :param path: the path to the inventory config file - :return the contents of the config file - ''' - if super(InventoryModule, self).verify_file(path): - if path.endswith(('stackpath_compute.yml', 'stackpath_compute.yaml')): - return True - display.debug( - "stackpath_compute inventory filename must end with \ - 'stackpath_compute.yml' or 'stackpath_compute.yaml'" - ) - return False - - def parse(self, inventory, loader, path, cache=True): - - super(InventoryModule, self).parse(inventory, loader, path) - - config = self._read_config_data(path) - self._validate_config(config) - self._set_credentials() - - # get user specifications - self.use_internal_ip = self.get_option('use_internal_ip') - if self.use_internal_ip: - self.hostname_key = "ipAddress" - else: - self.hostname_key = "externalIpAddress" - - self.stack_slugs = self.get_option('stack_slugs') - if not self.stack_slugs: - try: - stacks = self._stackpath_query_get_list(self.api_host + '/stack/v1/stacks') - self._get_stack_slugs(stacks) - except Exception: - raise AnsibleError("Failed to get stack IDs from the Stackpath API: %s" % traceback.format_exc()) - - cache_key = self.get_cache_key(path) - # false when refresh_cache or --flush-cache is used - if cache: - # get the user-specified directive - cache = self.get_option('cache') - - # Generate inventory - cache_needs_update = False - if cache: - try: - results = self._cache[cache_key] - except KeyError: - # if cache expires or cache file doesn't exist - cache_needs_update = True - - if not cache or cache_needs_update: - results = self._query() - - self._populate(results) - - # If the cache has expired/doesn't exist or - # if refresh_inventory/flush cache is used - # when the user is using caching, update the cached inventory - try: - if cache_needs_update or (not cache and self.get_option('cache')): - self._cache[cache_key] = results - except Exception: - raise AnsibleError("Failed to populate data: %s" % traceback.format_exc()) diff --git a/plugins/inventory/virtualbox.py b/plugins/inventory/virtualbox.py index c926d8b449..564db57dac 100644 --- a/plugins/inventory/virtualbox.py +++ b/plugins/inventory/virtualbox.py @@ -1,68 +1,88 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: virtualbox - short_description: virtualbox inventory source +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: virtualbox +short_description: Virtualbox inventory source +description: + - Get inventory hosts from the local virtualbox installation. + - Uses a YAML configuration file that ends with virtualbox.(yml|yaml) or vbox.(yml|yaml). + - The inventory_hostname is always the 'Name' of the virtualbox instance. + - Groups can be assigned to the VMs using C(VBoxManage). Multiple groups can be assigned by using V(/) as a delimeter. + - A separate parameter, O(enable_advanced_group_parsing) is exposed to change grouping behaviour. See the parameter documentation + for details. +extends_documentation_fragment: + - constructed + - inventory_cache +options: + plugin: + description: Token that ensures this is a source file for the P(community.general.virtualbox#inventory) plugin. + type: string + required: true + choices: ['virtualbox', 'community.general.virtualbox'] + running_only: + description: Toggles showing all VMs instead of only those currently running. + type: boolean + default: false + settings_password_file: + description: Provide a file containing the settings password (equivalent to C(--settingspwfile)). + type: string + network_info_path: + description: Property path to query for network information (C(ansible_host)). + type: string + default: "/VirtualBox/GuestInfo/Net/0/V4/IP" + query: + description: Create vars from virtualbox properties. + type: dictionary + default: {} + enable_advanced_group_parsing: description: - - Get inventory hosts from the local virtualbox installation. - - Uses a YAML configuration file that ends with virtualbox.(yml|yaml) or vbox.(yml|yaml). - - The inventory_hostname is always the 'Name' of the virtualbox instance. - extends_documentation_fragment: - - constructed - - inventory_cache - options: - plugin: - description: token that ensures this is a source file for the 'virtualbox' plugin - required: true - choices: ['virtualbox', 'community.general.virtualbox'] - running_only: - description: toggles showing all vms vs only those currently running - type: boolean - default: false - settings_password_file: - description: provide a file containing the settings password (equivalent to --settingspwfile) - network_info_path: - description: property path to query for network information (ansible_host) - default: "/VirtualBox/GuestInfo/Net/0/V4/IP" - query: - description: create vars from virtualbox properties - type: dictionary - default: {} -''' + - The default group parsing rule (when this setting is set to V(false)) is to split the VirtualBox VM's group based + on the V(/) character and assign the resulting list elements as an Ansible Group. + - Setting O(enable_advanced_group_parsing=true) changes this behaviour to match VirtualBox's interpretation of groups + according to U(https://www.virtualbox.org/manual/UserManual.html#gui-vmgroups). Groups are now split using the V(,) + character, and the V(/) character indicates nested groups. + - When enabled, a VM that's been configured using V(VBoxManage modifyvm "vm01" --groups "/TestGroup/TestGroup2,/TestGroup3") + results in the group C(TestGroup2) being a child group of C(TestGroup); and the VM being a part of C(TestGroup2) + and C(TestGroup3). + default: false + type: bool + version_added: 9.2.0 +""" -EXAMPLES = ''' +EXAMPLES = r""" +--- # file must be named vbox.yaml or vbox.yml -simple_config_file: - plugin: community.general.virtualbox - settings_password_file: /etc/virtulbox/secrets - query: - logged_in_users: /VirtualBox/GuestInfo/OS/LoggedInUsersList - compose: - ansible_connection: ('indows' in vbox_Guest_OS)|ternary('winrm', 'ssh') +plugin: community.general.virtualbox +settings_password_file: /etc/virtualbox/secrets +query: + logged_in_users: /VirtualBox/GuestInfo/OS/LoggedInUsersList +compose: + ansible_connection: ('indows' in vbox_Guest_OS)|ternary('winrm', 'ssh') +--- # add hosts (all match with minishift vm) to the group container if any of the vms are in ansible_inventory' plugin: community.general.virtualbox groups: container: "'minis' in (inventory_hostname)" -''' +""" import os from subprocess import Popen, PIPE from ansible.errors import AnsibleParserError -from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text -from ansible.module_utils.common._collections_compat import MutableMapping +from ansible.module_utils.common.text.converters import to_bytes, to_text +from collections.abc import MutableMapping from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable from ansible.module_utils.common.process import get_bin_path +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe + class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): ''' Host inventory parser for ansible using local virtualbox. ''' @@ -116,6 +136,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): self._add_host_to_keyed_groups(self.get_option('keyed_groups'), hostvars[host], host, strict=strict) def _populate_from_cache(self, source_data): + source_data = make_unsafe(source_data) hostvars = source_data.pop('_meta', {}).get('hostvars', {}) for group in source_data: if group == 'all': @@ -162,7 +183,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): v = v.strip() # found host if k.startswith('Name') and ',' not in v: # some setting strings appear in Name - current_host = v + current_host = make_unsafe(v) if current_host not in hostvars: hostvars[current_host] = {} self.inventory.add_host(current_host) @@ -170,32 +191,29 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): # try to get network info netdata = self._query_vbox_data(current_host, netinfo) if netdata: - self.inventory.set_variable(current_host, 'ansible_host', netdata) + self.inventory.set_variable(current_host, 'ansible_host', make_unsafe(netdata)) # found groups elif k == 'Groups': - for group in v.split('/'): - if group: - group = self.inventory.add_group(group) - self.inventory.add_child(group, current_host) - if group not in cacheable_results: - cacheable_results[group] = {'hosts': []} - cacheable_results[group]['hosts'].append(current_host) + if self.get_option('enable_advanced_group_parsing'): + self._handle_vboxmanage_group_string(v, current_host, cacheable_results) + else: + self._handle_group_string(v, current_host, cacheable_results) continue else: # found vars, accumulate in hostvars for clean inventory set - pref_k = 'vbox_' + k.strip().replace(' ', '_') + pref_k = make_unsafe(f"vbox_{k.strip().replace(' ', '_')}") leading_spaces = len(k) - len(k.lstrip(' ')) if 0 < leading_spaces <= 2: if prevkey not in hostvars[current_host] or not isinstance(hostvars[current_host][prevkey], dict): hostvars[current_host][prevkey] = {} - hostvars[current_host][prevkey][pref_k] = v + hostvars[current_host][prevkey][pref_k] = make_unsafe(v) elif leading_spaces > 2: continue else: if v != '': - hostvars[current_host][pref_k] = v + hostvars[current_host][pref_k] = make_unsafe(v) if self._ungrouped_host(current_host, cacheable_results): if 'ungrouped' not in cacheable_results: cacheable_results['ungrouped'] = {'hosts': []} @@ -223,6 +241,64 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): return all(find_host(host, inventory)) + def _handle_group_string(self, vboxmanage_group, current_host, cacheable_results): + '''Handles parsing the VM's Group assignment from VBoxManage according to this inventory's initial implementation.''' + # The original implementation of this inventory plugin treated `/` as + # a delimeter to split and use as Ansible Groups. + for group in vboxmanage_group.split('/'): + if group: + group = make_unsafe(group) + group = self.inventory.add_group(group) + self.inventory.add_child(group, current_host) + if group not in cacheable_results: + cacheable_results[group] = {'hosts': []} + cacheable_results[group]['hosts'].append(current_host) + + def _handle_vboxmanage_group_string(self, vboxmanage_group, current_host, cacheable_results): + '''Handles parsing the VM's Group assignment from VBoxManage according to VirtualBox documentation.''' + # Per the VirtualBox documentation, a VM can be part of many groups, + # and it is possible to have nested groups. + # Many groups are separated by commas ",", and nested groups use + # slash "/". + # https://www.virtualbox.org/manual/UserManual.html#gui-vmgroups + # Multi groups: VBoxManage modifyvm "vm01" --groups "/TestGroup,/TestGroup2" + # Nested groups: VBoxManage modifyvm "vm01" --groups "/TestGroup/TestGroup2" + + for group in vboxmanage_group.split(','): + if not group: + # We could get an empty element due how to split works, and + # possible assignments from VirtualBox. e.g. ,/Group1 + continue + + if group == "/": + # This is the "root" group. We get here if the VM was not + # assigned to a particular group. Consider the host to be + # unassigned to a group. + continue + + parent_group = None + for subgroup in group.split('/'): + if not subgroup: + # Similarly to above, we could get an empty element. + # e.g //Group1 + continue + + if subgroup == '/': + # "root" group. + # Consider the host to be unassigned + continue + + subgroup = make_unsafe(subgroup) + subgroup = self.inventory.add_group(subgroup) + if parent_group is not None: + self.inventory.add_child(parent_group, subgroup) + self.inventory.add_child(subgroup, current_host) + if subgroup not in cacheable_results: + cacheable_results[subgroup] = {'hosts': []} + cacheable_results[subgroup]['hosts'].append(current_host) + + parent_group = subgroup + def verify_file(self, path): valid = False @@ -276,7 +352,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): try: p = Popen(cmd, stdout=PIPE) except Exception as e: - raise AnsibleParserError(to_native(e)) + raise AnsibleParserError(str(e)) source_data = p.stdout.read().splitlines() diff --git a/plugins/inventory/xen_orchestra.py b/plugins/inventory/xen_orchestra.py index 5a466a6ab0..fc0f0db757 100644 --- a/plugins/inventory/xen_orchestra.py +++ b/plugins/inventory/xen_orchestra.py @@ -1,67 +1,84 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2021 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: xen_orchestra - short_description: Xen Orchestra inventory source - version_added: 4.1.0 - author: - - Dom Del Nano (@ddelnano) - - Samori Gorse (@shinuza) - requirements: - - websocket-client >= 1.0.0 +DOCUMENTATION = r""" +name: xen_orchestra +short_description: Xen Orchestra inventory source +version_added: 4.1.0 +author: + - Dom Del Nano (@ddelnano) + - Samori Gorse (@shinuza) +requirements: + - websocket-client >= 1.0.0 +description: + - Get inventory hosts from a Xen Orchestra deployment. + - Uses a configuration file as an inventory source, it must end in C(.xen_orchestra.yml) or C(.xen_orchestra.yaml). +extends_documentation_fragment: + - constructed + - inventory_cache +options: + plugin: + description: The name of this plugin, it should always be set to V(community.general.xen_orchestra) for this plugin to + recognize it as its own. + required: true + choices: ['community.general.xen_orchestra'] + type: str + api_host: description: - - Get inventory hosts from a Xen Orchestra deployment. - - 'Uses a configuration file as an inventory source, it must end in C(.xen_orchestra.yml) or C(.xen_orchestra.yaml).' - extends_documentation_fragment: - - constructed - - inventory_cache - options: - plugin: - description: The name of this plugin, it should always be set to C(community.general.xen_orchestra) for this plugin to recognize it as its own. - required: true - choices: ['community.general.xen_orchestra'] - type: str - api_host: - description: - - API host to XOA API. - - If the value is not specified in the inventory configuration, the value of environment variable C(ANSIBLE_XO_HOST) will be used instead. - type: str - env: - - name: ANSIBLE_XO_HOST - user: - description: - - Xen Orchestra user. - - If the value is not specified in the inventory configuration, the value of environment variable C(ANSIBLE_XO_USER) will be used instead. - required: true - type: str - env: - - name: ANSIBLE_XO_USER - password: - description: - - Xen Orchestra password. - - If the value is not specified in the inventory configuration, the value of environment variable C(ANSIBLE_XO_PASSWORD) will be used instead. - required: true - type: str - env: - - name: ANSIBLE_XO_PASSWORD - validate_certs: - description: Verify TLS certificate if using HTTPS. - type: boolean - default: true - use_ssl: - description: Use wss when connecting to the Xen Orchestra API - type: boolean - default: true -''' + - API host to XOA API. + - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_HOST) + is used instead. + type: str + env: + - name: ANSIBLE_XO_HOST + user: + description: + - Xen Orchestra user. + - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_USER) + is used instead. + required: true + type: str + env: + - name: ANSIBLE_XO_USER + password: + description: + - Xen Orchestra password. + - If the value is not specified in the inventory configuration, the value of environment variable E(ANSIBLE_XO_PASSWORD) + is used instead. + required: true + type: str + env: + - name: ANSIBLE_XO_PASSWORD + validate_certs: + description: Verify TLS certificate if using HTTPS. + type: boolean + default: true + use_ssl: + description: Use wss when connecting to the Xen Orchestra API. + type: boolean + default: true + use_vm_uuid: + description: + - Import Xen VMs to inventory using their UUID as the VM entry name. + - If set to V(false) use VM name labels instead of UUIDs. + type: boolean + default: true + version_added: 10.4.0 + use_host_uuid: + description: + - Import Xen Hosts to inventory using their UUID as the Host entry name. + - If set to V(false) use Host name labels instead of UUIDs. + type: boolean + default: true + version_added: 10.4.0 +""" -EXAMPLES = ''' +EXAMPLES = r""" +--- # file must be named xen_orchestra.yaml or xen_orchestra.yml plugin: community.general.xen_orchestra api_host: 192.168.1.255 @@ -70,19 +87,22 @@ password: xo_pwd validate_certs: true use_ssl: true groups: - kube_nodes: "'kube_node' in tags" + kube_nodes: "'kube_node' in tags" compose: - ansible_port: 2222 - -''' + ansible_port: 2222 +use_vm_uuid: false +use_host_uuid: true +""" import json import ssl +from time import sleep from ansible.errors import AnsibleError from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable from ansible_collections.community.general.plugins.module_utils.version import LooseVersion +from ansible_collections.community.general.plugins.plugin_utils.unsafe import make_unsafe # 3rd party imports try: @@ -136,27 +156,45 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): sslopt = None if validate_certs else {'cert_reqs': ssl.CERT_NONE} self.conn = create_connection( - '{0}://{1}/api/'.format(proto, xoa_api_host), sslopt=sslopt) + f'{proto}://{xoa_api_host}/api/', sslopt=sslopt) + + CALL_TIMEOUT = 100 + """Number of 1/10ths of a second to wait before method call times out.""" + + def call(self, method, params): + """Calls a method on the XO server with the provided parameters.""" + id = self.pointer + self.conn.send(json.dumps({ + 'id': id, + 'jsonrpc': '2.0', + 'method': method, + 'params': params + })) + + waited = 0 + while waited < self.CALL_TIMEOUT: + response = json.loads(self.conn.recv()) + if 'id' in response and response['id'] == id: + return response + else: + sleep(0.1) + waited += 1 + + raise AnsibleError(f'Method call {method} timed out after {self.CALL_TIMEOUT / 10} seconds.') def login(self, user, password): - payload = {'id': self.pointer, 'jsonrpc': '2.0', 'method': 'session.signIn', 'params': { - 'username': user, 'password': password}} - self.conn.send(json.dumps(payload)) - result = json.loads(self.conn.recv()) + result = self.call('session.signIn', { + 'username': user, 'password': password + }) if 'error' in result: - raise AnsibleError( - 'Could not connect: {0}'.format(result['error'])) + raise AnsibleError(f"Could not connect: {result['error']}") def get_object(self, name): - payload = {'id': self.pointer, 'jsonrpc': '2.0', - 'method': 'xo.getAllObjects', 'params': {'filter': {'type': name}}} - self.conn.send(json.dumps(payload)) - answer = json.loads(self.conn.recv()) + answer = self.call('xo.getAllObjects', {'filter': {'type': name}}) if 'error' in answer: - raise AnsibleError( - 'Could not request: {0}'.format(answer['error'])) + raise AnsibleError(f"Could not request: {answer['error']}") return answer['result'] @@ -177,10 +215,20 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): self._set_composite_vars(self.get_option('compose'), variables, name, strict=strict) def _add_vms(self, vms, hosts, pools): + vm_name_list = [] for uuid, vm in vms.items(): + if self.vm_entry_name_type == 'name_label': + if vm['name_label'] not in vm_name_list: + entry_name = vm['name_label'] + vm_name_list.append(vm['name_label']) + else: + vm_duplicate_count = vm_name_list.count(vm['name_label']) + entry_name = f"{vm['name_label']}_{vm_duplicate_count}" + vm_name_list.append(vm['name_label']) + else: + entry_name = uuid group = 'with_ip' ip = vm.get('mainIpAddress') - entry_name = uuid power_state = vm['power_state'].lower() pool_name = self._pool_group_name_for_uuid(pools, vm['$poolId']) host_name = self._host_group_name_for_uuid(hosts, vm['$container']) @@ -227,10 +275,20 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): self._apply_constructable(entry_name, self.inventory.get_host(entry_name).get_vars()) def _add_hosts(self, hosts, pools): + host_name_list = [] for host in hosts.values(): - entry_name = host['uuid'] - group_name = 'xo_host_{0}'.format( - clean_group_name(host['name_label'])) + if self.host_entry_name_type == 'name_label': + if host['name_label'] not in host_name_list: + entry_name = host['name_label'] + host_name_list.append(host['name_label']) + else: + host_duplicate_count = host_name_list.count(host['name_label']) + entry_name = f"{host['name_label']}_{host_duplicate_count}" + host_name_list.append(host['name_label']) + else: + entry_name = host['uuid'] + + group_name = f"xo_host_{clean_group_name(host['name_label'])}" pool_name = self._pool_group_name_for_uuid(pools, host['$poolId']) self.inventory.add_group(group_name) @@ -253,15 +311,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): entry_name, 'product_brand', host['productBrand']) for pool in pools.values(): - group_name = 'xo_pool_{0}'.format( - clean_group_name(pool['name_label'])) + group_name = f"xo_pool_{clean_group_name(pool['name_label'])}" self.inventory.add_group(group_name) def _add_pools(self, pools): for pool in pools.values(): - group_name = 'xo_pool_{0}'.format( - clean_group_name(pool['name_label'])) + group_name = f"xo_pool_{clean_group_name(pool['name_label'])}" self.inventory.add_group(group_name) @@ -269,16 +325,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): def _pool_group_name_for_uuid(self, pools, pool_uuid): for pool in pools: if pool == pool_uuid: - return 'xo_pool_{0}'.format( - clean_group_name(pools[pool_uuid]['name_label'])) + return f"xo_pool_{clean_group_name(pools[pool_uuid]['name_label'])}" # TODO: Refactor def _host_group_name_for_uuid(self, hosts, host_uuid): for host in hosts: if host == host_uuid: - return 'xo_host_{0}'.format( - clean_group_name(hosts[host_uuid]['name_label'] - )) + return f"xo_host_{clean_group_name(hosts[host_uuid]['name_label'])}" def _populate(self, objects): # Prepare general groups @@ -324,5 +377,13 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): if not self.get_option('use_ssl'): self.protocol = 'ws' + self.vm_entry_name_type = 'uuid' + if not self.get_option('use_vm_uuid'): + self.vm_entry_name_type = 'name_label' + + self.host_entry_name_type = 'uuid' + if not self.get_option('use_host_uuid'): + self.host_entry_name_type = 'name_label' + objects = self._get_objects() - self._populate(objects) + self._populate(make_unsafe(objects)) diff --git a/plugins/lookup/binary_file.py b/plugins/lookup/binary_file.py new file mode 100644 index 0000000000..3236ade3e4 --- /dev/null +++ b/plugins/lookup/binary_file.py @@ -0,0 +1,113 @@ +# +# Copyright (c) 2025, Felix Fontein +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +DOCUMENTATION = r""" +name: binary_file +author: Felix Fontein (@felixfontein) +short_description: Read binary file and return it Base64 encoded +version_added: 11.2.0 +description: + - This lookup returns the contents from a file on the Ansible controller's file system. + - The file is read as a binary file and its contents are returned Base64 encoded. + This is similar to using P(ansible.builtin.file#lookup) combined with P(ansible.builtin.b64encode#filter), + except that P(ansible.builtin.file#lookup) does not support binary files as it interprets the contents as UTF-8, + which can cause the wrong content being Base64 encoded. +options: + _terms: + description: + - Paths of the files to read. + - Relative paths will be searched for in different places. See R(Ansible task paths, playbook_task_paths) for more details. + required: true + type: list + elements: str + not_exist: + description: + - Determine how to react if the specified file cannot be found. + type: str + choices: + error: Raise an error. + empty: Return an empty string for the file. + empty_str: + - Return the string C(empty) for the file. + - This cannot be confused with Base64 encoding due to the missing padding. + default: error +notes: + - This lookup does not understand 'globbing' - use the P(ansible.builtin.fileglob#lookup) lookup instead. +seealso: + - plugin: ansible.builtin.b64decode + plugin_type: filter + description: >- + The b64decode filter can be used to decode Base64 encoded data. + Note that Ansible cannot handle binary data, the data will be interpreted as UTF-8 text! + - plugin: ansible.builtin.file + plugin_type: lookup + description: You can use this lookup plugin to read text files from the Ansible controller. + - module: ansible.builtin.slurp + description: >- + Also allows to read binary files Base64 encoded, but from remote targets. + With C(delegate_to: localhost) can be redirected to run on the controller, but you have to know the path to the file to read. + Both this plugin and P(ansible.builtin.file#lookup) use some search path logic to for example also find files in the C(files) + directory of a role. + - ref: playbook_task_paths + description: Search paths used for relative files. +""" + +EXAMPLES = r""" +--- +- name: Output Base64 contents of binary files on screen + ansible.builtin.debug: + msg: "Content: {{ lookup('community.general.binary_file', item) }}" + loop: + - some-binary-file.bin +""" + +RETURN = r""" +_raw: + description: + - Base64 encoded content of requested files, or an empty string resp. the string C(empty), depending on the O(not_exist) option. + - This list contains one string per element of O(_terms) in the same order as O(_terms). + type: list + elements: str + returned: success +""" + +import base64 + +from ansible.errors import AnsibleLookupError +from ansible.plugins.lookup import LookupBase + +from ansible.utils.display import Display + +display = Display() + + +class LookupModule(LookupBase): + + def run(self, terms, variables=None, **kwargs): + self.set_options(var_options=variables, direct=kwargs) + not_exist = self.get_option("not_exist") + + result = [] + for term in terms: + display.debug(f"Searching for binary file: {term!r}") + path = self.find_file_in_search_path(variables, "files", term, ignore_missing=(not_exist != "error")) + display.vvvv(f"community.general.binary_file lookup using {path} as file") + + if not path: + if not_exist == "empty": + result.append("") + continue + if not_exist == "empty_str": + result.append("empty") + continue + raise AnsibleLookupError(f"Could not locate file in community.general.binary_file lookup: {term}") + + try: + with open(path, "rb") as f: + result.append(base64.b64encode(f.read()).decode("utf-8")) + except Exception as exc: + raise AnsibleLookupError(f"Error while reading {path}: {exc}") + + return result diff --git a/plugins/lookup/bitwarden.py b/plugins/lookup/bitwarden.py index 389fa475bd..e4d958a96f 100644 --- a/plugins/lookup/bitwarden.py +++ b/plugins/lookup/bitwarden.py @@ -1,78 +1,126 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2022, Jonathan Lung # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = """ - name: bitwarden - author: - - Jonathan Lung (@lungj) - requirements: - - bw (command line utility) - - be logged into bitwarden - short_description: Retrieve secrets from Bitwarden - version_added: 5.4.0 +DOCUMENTATION = r""" +name: bitwarden +author: + - Jonathan Lung (@lungj) +requirements: + - bw (command line utility) + - be logged into bitwarden + - bitwarden vault unlocked + - E(BW_SESSION) environment variable set +short_description: Retrieve secrets from Bitwarden +version_added: 5.4.0 +description: + - Retrieve secrets from Bitwarden. +options: + _terms: + description: Key(s) to fetch values for from login info. + required: true + type: list + elements: str + search: description: - - Retrieve secrets from Bitwarden. - options: - _terms: - description: Key(s) to fetch values for from login info. - required: true - type: list - elements: str - search: - description: Field to retrieve, for example C(name) or C(id). - type: str - default: name - version_added: 5.7.0 - field: - description: Field to fetch. Leave unset to fetch whole response. - type: str - collection_id: - description: Collection ID to filter results by collection. Leave unset to skip filtering. - type: str - version_added: 6.3.0 + - Field to retrieve, for example V(name) or V(id). + - If set to V(id), only zero or one element can be returned. Use the Jinja C(first) filter to get the only list element. + - If set to V(None) or V(''), or if O(_terms) is empty, records are not filtered by fields. + type: str + default: name + version_added: 5.7.0 + field: + description: Field to fetch. Leave unset to fetch whole response. + type: str + collection_id: + description: + - Collection ID to filter results by collection. Leave unset to skip filtering. + - O(collection_id) and O(collection_name) are mutually exclusive. + type: str + version_added: 6.3.0 + collection_name: + description: + - Collection name to filter results by collection. Leave unset to skip filtering. + - O(collection_id) and O(collection_name) are mutually exclusive. + type: str + version_added: 10.4.0 + organization_id: + description: Organization ID to filter results by organization. Leave unset to skip filtering. + type: str + version_added: 8.5.0 + bw_session: + description: Pass session key instead of reading from env. + type: str + version_added: 8.4.0 + result_count: + description: + - Number of results expected for the lookup query. Task fails if O(result_count) is set but does not match the number + of query results. Leave empty to skip this check. + type: int + version_added: 10.4.0 """ -EXAMPLES = """ -- name: "Get 'password' from Bitwarden record named 'a_test'" +EXAMPLES = r""" +- name: "Get 'password' from all Bitwarden records named 'a_test'" ansible.builtin.debug: msg: >- {{ lookup('community.general.bitwarden', 'a_test', field='password') }} -- name: "Get 'password' from Bitwarden record with id 'bafba515-af11-47e6-abe3-af1200cd18b2'" +- name: "Get 'password' from Bitwarden record with ID 'bafba515-af11-47e6-abe3-af1200cd18b2'" ansible.builtin.debug: msg: >- - {{ lookup('community.general.bitwarden', 'bafba515-af11-47e6-abe3-af1200cd18b2', search='id', field='password') }} + {{ lookup('community.general.bitwarden', 'bafba515-af11-47e6-abe3-af1200cd18b2', search='id', field='password') | first }} -- name: "Get 'password' from Bitwarden record named 'a_test' from collection" +- name: "Get 'password' from all Bitwarden records named 'a_test' from collection" ansible.builtin.debug: msg: >- {{ lookup('community.general.bitwarden', 'a_test', field='password', collection_id='bafba515-af11-47e6-abe3-af1200cd18b2') }} -- name: "Get full Bitwarden record named 'a_test'" +- name: "Get list of all full Bitwarden records named 'a_test'" ansible.builtin.debug: msg: >- {{ lookup('community.general.bitwarden', 'a_test') }} -- name: "Get custom field 'api_key' from Bitwarden record named 'a_test'" +- name: "Get custom field 'api_key' from all Bitwarden records named 'a_test'" ansible.builtin.debug: msg: >- {{ lookup('community.general.bitwarden', 'a_test', field='api_key') }} + +- name: "Get 'password' from all Bitwarden records named 'a_test', using given session key" + ansible.builtin.debug: + msg: >- + {{ lookup('community.general.bitwarden', 'a_test', field='password', bw_session='bXZ9B5TXi6...') }} + +- name: "Get all Bitwarden records from collection" + ansible.builtin.debug: + msg: >- + {{ lookup('community.general.bitwarden', None, collection_id='bafba515-af11-47e6-abe3-af1200cd18b2') }} + +- name: "Get all Bitwarden records from collection" + ansible.builtin.debug: + msg: >- + {{ lookup('community.general.bitwarden', None, collection_name='my_collections/test_collection') }} + +- name: "Get Bitwarden record named 'a_test', ensure there is exactly one match" + ansible.builtin.debug: + msg: >- + {{ lookup('community.general.bitwarden', 'a_test', result_count=1) }} """ -RETURN = """ - _raw: - description: List of requested field or JSON object of list of matches. - type: list - elements: raw +RETURN = r""" +_raw: + description: + - A one-element list that contains a list of requested fields or JSON objects of matches. + - If you use C(query), you get a list of lists. If you use C(lookup) without C(wantlist=true), this always gets reduced + to a list of field values or JSON objects. + type: list + elements: list """ from subprocess import Popen, PIPE -from ansible.errors import AnsibleError +from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.module_utils.common.text.converters import to_bytes, to_text from ansible.parsing.ajson import AnsibleJSONDecoder from ansible.plugins.lookup import LookupBase @@ -86,11 +134,20 @@ class Bitwarden(object): def __init__(self, path='bw'): self._cli_path = path + self._session = None @property def cli_path(self): return self._cli_path + @property + def session(self): + return self._session + + @session.setter + def session(self, value): + self._session = value + @property def unlocked(self): out, err = self._run(['status'], stdin="") @@ -98,65 +155,141 @@ class Bitwarden(object): return decoded['status'] == 'unlocked' def _run(self, args, stdin=None, expected_rc=0): + if self.session: + args += ['--session', self.session] + p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE) out, err = p.communicate(to_bytes(stdin)) rc = p.wait() if rc != expected_rc: + if len(args) > 2 and args[0] == 'get' and args[1] == 'item' and b'Not found.' in err: + return 'null', '' raise BitwardenException(err) return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict') - def _get_matches(self, search_value, search_field, collection_id): + def _get_matches(self, search_value, search_field, collection_id=None, organization_id=None): """Return matching records whose search_field is equal to key. """ # Prepare set of params for Bitwarden CLI - params = ['list', 'items', '--search', search_value] + if search_field == 'id': + params = ['get', 'item', search_value] + else: + params = ['list', 'items'] + if search_value: + params.extend(['--search', search_value]) if collection_id: params.extend(['--collectionid', collection_id]) + if organization_id: + params.extend(['--organizationid', organization_id]) out, err = self._run(params) # This includes things that matched in different fields. initial_matches = AnsibleJSONDecoder().raw_decode(out)[0] - # Filter to only include results from the right field. - return [item for item in initial_matches if item[search_field] == search_value] + if search_field == 'id': + if initial_matches is None: + initial_matches = [] + else: + initial_matches = [initial_matches] - def get_field(self, field, search_value, search_field="name", collection_id=None): + # Filter to only include results from the right field, if a search is requested by value or field + return [item for item in initial_matches + if not search_value or not search_field or item.get(search_field) == search_value] + + def get_field(self, field, search_value, search_field="name", collection_id=None, organization_id=None): """Return a list of the specified field for records whose search_field match search_value and filtered by collection if collection has been provided. If field is None, return the whole record for each match. """ - matches = self._get_matches(search_value, search_field, collection_id) - - if field in ['autofillOnPageLoad', 'password', 'passwordRevisionDate', 'totp', 'uris', 'username']: - return [match['login'][field] for match in matches] - elif not field: + matches = self._get_matches(search_value, search_field, collection_id, organization_id) + if not field: return matches - else: - custom_field_matches = [] - for match in matches: + field_matches = [] + for match in matches: + # if there are no custom fields, then `match` has no key 'fields' + if 'fields' in match: + custom_field_found = False for custom_field in match['fields']: - if custom_field['name'] == field: - custom_field_matches.append(custom_field['value']) - if matches and not custom_field_matches: - raise AnsibleError("Custom field {field} does not exist in {search_value}".format(field=field, search_value=search_value)) - return custom_field_matches + if field == custom_field['name']: + field_matches.append(custom_field['value']) + custom_field_found = True + break + if custom_field_found: + continue + if 'login' in match and field in match['login']: + field_matches.append(match['login'][field]) + continue + if field in match: + field_matches.append(match[field]) + continue + + if matches and not field_matches: + raise AnsibleError(f"field {field} does not exist in {search_value}") + + return field_matches + + def get_collection_ids(self, collection_name: str, organization_id=None) -> list[str]: + """Return matching IDs of collections whose name is equal to collection_name.""" + + # Prepare set of params for Bitwarden CLI + params = ['list', 'collections', '--search', collection_name] + + if organization_id: + params.extend(['--organizationid', organization_id]) + + out, err = self._run(params) + + # This includes things that matched in different fields. + initial_matches = AnsibleJSONDecoder().raw_decode(out)[0] + + # Filter to only return the ID of a collections with exactly matching name + return [item['id'] for item in initial_matches + if str(item.get('name')).lower() == collection_name.lower()] class LookupModule(LookupBase): - def run(self, terms, variables=None, **kwargs): + def run(self, terms=None, variables=None, **kwargs): self.set_options(var_options=variables, direct=kwargs) field = self.get_option('field') search_field = self.get_option('search') collection_id = self.get_option('collection_id') + collection_name = self.get_option('collection_name') + organization_id = self.get_option('organization_id') + result_count = self.get_option('result_count') + _bitwarden.session = self.get_option('bw_session') + if not _bitwarden.unlocked: raise AnsibleError("Bitwarden Vault locked. Run 'bw unlock'.") - return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms] + if not terms: + terms = [None] + + if collection_name and collection_id: + raise AnsibleOptionsError("'collection_name' and 'collection_id' are mutually exclusive!") + elif collection_name: + collection_ids = _bitwarden.get_collection_ids(collection_name, organization_id) + if not collection_ids: + raise BitwardenException("No matching collections found!") + else: + collection_ids = [collection_id] + + results = [ + _bitwarden.get_field(field, term, search_field, collection_id, organization_id) + for collection_id in collection_ids + for term in terms + ] + + for result in results: + if result_count is not None and len(result) != result_count: + raise BitwardenException( + f"Number of results doesn't match result_count! ({len(result)} != {result_count})") + + return results _bitwarden = Bitwarden() diff --git a/plugins/lookup/bitwarden_secrets_manager.py b/plugins/lookup/bitwarden_secrets_manager.py new file mode 100644 index 0000000000..0227c16bae --- /dev/null +++ b/plugins/lookup/bitwarden_secrets_manager.py @@ -0,0 +1,161 @@ +# Copyright (c) 2023, jantari (https://github.com/jantari) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + + +DOCUMENTATION = r""" +name: bitwarden_secrets_manager +author: + - jantari (@jantari) +requirements: + - bws (command line utility) +short_description: Retrieve secrets from Bitwarden Secrets Manager +version_added: 7.2.0 +description: + - Retrieve secrets from Bitwarden Secrets Manager. +options: + _terms: + description: Secret ID(s) to fetch values for. + required: true + type: list + elements: str + bws_access_token: + description: The BWS access token to use for this lookup. + env: + - name: BWS_ACCESS_TOKEN + required: true + type: str +""" + +EXAMPLES = r""" +- name: Get a secret relying on the BWS_ACCESS_TOKEN environment variable for authentication + ansible.builtin.debug: + msg: >- + {{ lookup("community.general.bitwarden_secrets_manager", "2bc23e48-4932-40de-a047-5524b7ddc972") }} + +- name: Get a secret passing an explicit access token for authentication + ansible.builtin.debug: + msg: >- + {{ + lookup( + "community.general.bitwarden_secrets_manager", + "2bc23e48-4932-40de-a047-5524b7ddc972", + bws_access_token="9.4f570d14-4b54-42f5-bc07-60f4450b1db5.YmluYXJ5LXNvbWV0aGluZy0xMjMK:d2h5IGhlbGxvIHRoZXJlCg==" + ) + }} + +- name: Get two different secrets each using a different access token for authentication + ansible.builtin.debug: + msg: + - '{{ lookup("community.general.bitwarden_secrets_manager", "2bc23e48-4932-40de-a047-5524b7ddc972", bws_access_token=token1) }}' + - '{{ lookup("community.general.bitwarden_secrets_manager", "9d89af4c-eb5d-41f5-bb0f-4ae81215c768", bws_access_token=token2) }}' + vars: + token1: "9.4f570d14-4b54-42f5-bc07-60f4450b1db5.YmluYXJ5LXNvbWV0aGluZy0xMjMK:d2h5IGhlbGxvIHRoZXJlCg==" + token2: "1.69b72797-6ea9-4687-a11e-848e41a30ae6.YW5zaWJsZSBpcyBncmVhdD8K:YW5zaWJsZSBpcyBncmVhdAo=" + +- name: Get just the value of a secret + ansible.builtin.debug: + msg: >- + {{ lookup("community.general.bitwarden_secrets_manager", "2bc23e48-4932-40de-a047-5524b7ddc972").value }} +""" + +RETURN = r""" +_raw: + description: List containing one or more secrets. + type: list + elements: dict +""" + +from subprocess import Popen, PIPE +from time import sleep + +from ansible.errors import AnsibleLookupError +from ansible.module_utils.common.text.converters import to_text +from ansible.parsing.ajson import AnsibleJSONDecoder +from ansible.plugins.lookup import LookupBase + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + + +class BitwardenSecretsManagerException(AnsibleLookupError): + pass + + +class BitwardenSecretsManager(object): + def __init__(self, path='bws'): + self._cli_path = path + self._max_retries = 3 + self._retry_delay = 1 + + @property + def cli_path(self): + return self._cli_path + + def _run_with_retry(self, args, stdin=None, retries=0): + out, err, rc = self._run(args, stdin) + + if rc != 0: + if retries >= self._max_retries: + raise BitwardenSecretsManagerException("Max retries exceeded. Unable to retrieve secret.") + + if "Too many requests" in err: + delay = self._retry_delay * (2 ** retries) + sleep(delay) + return self._run_with_retry(args, stdin, retries + 1) + else: + raise BitwardenSecretsManagerException(f"Command failed with return code {rc}: {err}") + + return out, err, rc + + def _run(self, args, stdin=None): + p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE) + out, err = p.communicate(stdin) + rc = p.wait() + return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict'), rc + + def get_bws_version(self): + """Get the version of the Bitwarden Secrets Manager CLI. + """ + out, err, rc = self._run(['--version']) + if rc != 0: + raise BitwardenSecretsManagerException(to_text(err)) + # strip the prefix and grab the last segment, the version number + return out.split()[-1] + + def get_secret(self, secret_id, bws_access_token): + """Get and return the secret with the given secret_id. + """ + + # Prepare set of params for Bitwarden Secrets Manager CLI + # Color output was not always disabled correctly with the default 'auto' setting so explicitly disable it. + params = [ + '--color', 'no', + '--access-token', bws_access_token + ] + + # bws version 0.3.0 introduced a breaking change in the command line syntax: + # pre-0.3.0: verb noun + # 0.3.0 and later: noun verb + bws_version = self.get_bws_version() + if LooseVersion(bws_version) < LooseVersion('0.3.0'): + params.extend(['get', 'secret', secret_id]) + else: + params.extend(['secret', 'get', secret_id]) + + out, err, rc = self._run_with_retry(params) + if rc != 0: + raise BitwardenSecretsManagerException(to_text(err)) + + return AnsibleJSONDecoder().raw_decode(out)[0] + + +class LookupModule(LookupBase): + def run(self, terms, variables=None, **kwargs): + self.set_options(var_options=variables, direct=kwargs) + bws_access_token = self.get_option('bws_access_token') + + return [_bitwarden_secrets_manager.get_secret(term, bws_access_token) for term in terms] + + +_bitwarden_secrets_manager = BitwardenSecretsManager() diff --git a/plugins/lookup/cartesian.py b/plugins/lookup/cartesian.py index d76e8f532a..1e07326a17 100644 --- a/plugins/lookup/cartesian.py +++ b/plugins/lookup/cartesian.py @@ -1,29 +1,27 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2013, Bradley Young # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: cartesian - short_description: returns the cartesian product of lists +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: cartesian +short_description: Returns the cartesian product of lists +description: + - Takes the input lists and returns a list that represents the product of the input lists. + - It is clearer with an example, it turns [1, 2, 3], [a, b] into [1, a], [1, b], [2, a], [2, b], [3, a], [3, b]. + - You can see the exact syntax in the examples section. +options: + _terms: description: - - Takes the input lists and returns a list that represents the product of the input lists. - - It is clearer with an example, it turns [1, 2, 3], [a, b] into [1, a], [1, b], [2, a], [2, b], [3, a], [3, b]. - You can see the exact syntax in the examples section. - options: - _terms: - description: - - a set of lists - type: list - elements: list - required: true -''' + - A set of lists. + type: list + elements: list + required: true +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Example of the change in the description ansible.builtin.debug: msg: "{{ lookup('community.general.cartesian', [1,2,3], [a, b])}}" @@ -34,15 +32,15 @@ EXAMPLES = """ with_community.general.cartesian: - "{{list1}}" - "{{list2}}" - - [1,2,3,4,5,6] + - [1, 2, 3, 4, 5, 6] """ -RETURN = """ - _list: - description: - - list of lists composed of elements of the input lists - type: list - elements: list +RETURN = r""" +_list: + description: + - List of lists composed of elements of the input lists. + type: list + elements: list """ from itertools import product @@ -66,8 +64,7 @@ class LookupModule(LookupBase): """ results = [] for x in terms: - intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader) - results.append(intermediate) + results.append(listify_lookup_plugin_terms(x, templar=self._templar)) return results def run(self, terms, variables=None, **kwargs): diff --git a/plugins/lookup/chef_databag.py b/plugins/lookup/chef_databag.py index b14d924ae8..69a53d007e 100644 --- a/plugins/lookup/chef_databag.py +++ b/plugins/lookup/chef_databag.py @@ -1,45 +1,44 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2016, Josh Bradley # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: chef_databag - short_description: fetches data from a Chef Databag +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: chef_databag +short_description: Fetches data from a Chef Databag +description: + - 'This is a lookup plugin to provide access to chef data bags using the pychef package. It interfaces with the chef server + API using the same methods to find a knife or chef-client config file to load parameters from, starting from either the + given base path or the current working directory. The lookup order mirrors the one from Chef, all folders in the base + path are walked back looking for the following configuration file in order: C(.chef/knife.rb), C(~/.chef/knife.rb), C(/etc/chef/client.rb).' +requirements: + - "pychef (L(Python library, https://pychef.readthedocs.io), C(pip install pychef))" +options: + name: description: - - "This is a lookup plugin to provide access to chef data bags using the pychef package. - It interfaces with the chef server api using the same methods to find a knife or chef-client config file to load parameters from, - starting from either the given base path or the current working directory. - The lookup order mirrors the one from Chef, all folders in the base path are walked back looking for the following configuration - file in order : .chef/knife.rb, ~/.chef/knife.rb, /etc/chef/client.rb" - requirements: - - "pychef (L(Python library, https://pychef.readthedocs.io), C(pip install pychef))" - options: - name: - description: - - Name of the databag - required: true - item: - description: - - Item to fetch - required: true -''' - -EXAMPLES = """ - - ansible.builtin.debug: - msg: "{{ lookup('community.general.chef_databag', 'name=data_bag_name item=data_bag_item') }}" + - Name of the databag. + type: string + required: true + item: + description: + - Item to fetch. + type: string + required: true """ -RETURN = """ - _raw: - description: - - The value from the databag. - type: list - elements: dict +EXAMPLES = r""" +- ansible.builtin.debug: + msg: "{{ lookup('community.general.chef_databag', 'name=data_bag_name item=data_bag_item') }}" +""" + +RETURN = r""" +_raw: + description: + - The value from the databag. + type: list + elements: dict """ from ansible.errors import AnsibleError @@ -79,11 +78,11 @@ class LookupModule(LookupBase): setattr(self, arg, parsed) except ValueError: raise AnsibleError( - "can't parse arg {0}={1} as string".format(arg, arg_raw) + f"can't parse arg {arg}={arg_raw} as string" ) if args: raise AnsibleError( - "unrecognized arguments to with_sequence: %r" % list(args.keys()) + f"unrecognized arguments to with_sequence: {list(args.keys())!r}" ) def run(self, terms, variables=None, **kwargs): diff --git a/plugins/lookup/collection_version.py b/plugins/lookup/collection_version.py index 4d25585b81..7a9eaf10bd 100644 --- a/plugins/lookup/collection_version.py +++ b/plugins/lookup/collection_version.py @@ -2,72 +2,67 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = """ +DOCUMENTATION = r""" name: collection_version author: Felix Fontein (@felixfontein) version_added: "4.0.0" short_description: Retrieves the version of an installed collection description: - - This lookup allows to query the version of an installed collection, and to determine whether a - collection is installed at all. - - By default it returns C(none) for non-existing collections and C(*) for collections without a - version number. The latter should only happen in development environments, or when installing - a collection from git which has no version in its C(galaxy.yml). This behavior can be adjusted - by providing other values with I(result_not_found) and I(result_no_version). + - This lookup allows to query the version of an installed collection, and to determine whether a collection is installed + at all. + - By default it returns V(none) for non-existing collections and V(*) for collections without a version number. The latter + should only happen in development environments, or when installing a collection from git which has no version in its C(galaxy.yml). + This behavior can be adjusted by providing other values with O(result_not_found) and O(result_no_version). options: _terms: description: - The collections to look for. - - For example C(community.general). + - For example V(community.general). type: list elements: str required: true result_not_found: description: - The value to return when the collection could not be found. - - By default, C(none) is returned. + - By default, V(none) is returned. type: string default: ~ result_no_version: description: - The value to return when the collection has no version number. - - This can happen for collections installed from git which do not have a version number - in C(galaxy.yml). - - By default, C(*) is returned. + - This can happen for collections installed from git which do not have a version number in C(galaxy.yml). + - By default, V(*) is returned. type: string default: '*' """ -EXAMPLES = """ +EXAMPLES = r""" - name: Check version of community.general ansible.builtin.debug: msg: "community.general version {{ lookup('community.general.collection_version', 'community.general') }}" """ -RETURN = """ - _raw: - description: - - The version number of the collections listed as input. - - If a collection can not be found, it will return the value provided in I(result_not_found). - By default, this is C(none). - - If a collection can be found, but the version not identified, it will return the value provided in - I(result_no_version). By default, this is C(*). This can happen for collections installed - from git which do not have a version number in C(galaxy.yml). - type: list - elements: str +RETURN = r""" +_raw: + description: + - The version number of the collections listed as input. + - If a collection can not be found, it returns the value provided in O(result_not_found). By default, this is V(none). + - If a collection can be found, but the version not identified, it returns the value provided in O(result_no_version). + By default, this is V(*). This can happen for collections installed from git which do not have a version number in V(galaxy.yml). + type: list + elements: str """ import json import os import re +from importlib import import_module import yaml from ansible.errors import AnsibleLookupError -from ansible.module_utils.compat.importlib import import_module from ansible.plugins.lookup import LookupBase @@ -98,15 +93,10 @@ def load_collection_meta(collection_pkg, no_version='*'): if os.path.exists(manifest_path): return load_collection_meta_manifest(manifest_path) - # Try to load galaxy.y(a)ml + # Try to load galaxy.yml galaxy_path = os.path.join(path, 'galaxy.yml') - galaxy_alt_path = os.path.join(path, 'galaxy.yaml') - # galaxy.yaml was only supported in ansible-base 2.10 and ansible-core 2.11. Support was removed - # in https://github.com/ansible/ansible/commit/595413d11346b6f26bb3d9df2d8e05f2747508a3 for - # ansible-core 2.12. - for path in (galaxy_path, galaxy_alt_path): - if os.path.exists(path): - return load_collection_meta_galaxy(path, no_version=no_version) + if os.path.exists(galaxy_path): + return load_collection_meta_galaxy(galaxy_path, no_version=no_version) return {} @@ -120,10 +110,10 @@ class LookupModule(LookupBase): for term in terms: if not FQCN_RE.match(term): - raise AnsibleLookupError('"{term}" is not a FQCN'.format(term=term)) + raise AnsibleLookupError(f'"{term}" is not a FQCN') try: - collection_pkg = import_module('ansible_collections.{fqcn}'.format(fqcn=term)) + collection_pkg = import_module(f'ansible_collections.{term}') except ImportError: # Collection not found result.append(not_found) @@ -132,7 +122,7 @@ class LookupModule(LookupBase): try: data = load_collection_meta(collection_pkg, no_version=no_version) except Exception as exc: - raise AnsibleLookupError('Error while loading metadata for {fqcn}: {error}'.format(fqcn=term, error=exc)) + raise AnsibleLookupError(f'Error while loading metadata for {term}: {exc}') result.append(data.get('version', no_version)) diff --git a/plugins/lookup/consul_kv.py b/plugins/lookup/consul_kv.py index 2d4a202d94..c9cc3c6399 100644 --- a/plugins/lookup/consul_kv.py +++ b/plugins/lookup/consul_kv.py @@ -1,112 +1,117 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2015, Steve Gargan # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: consul_kv - short_description: Fetch metadata from a Consul key value store. - description: - - Lookup metadata for a playbook from the key value store in a Consul cluster. - Values can be easily set in the kv store with simple rest commands - - C(curl -X PUT -d 'some-value' http://localhost:8500/v1/kv/ansible/somedata) - requirements: - - 'python-consul python library U(https://python-consul.readthedocs.io/en/latest/#installation)' - options: - _raw: - description: List of key(s) to retrieve. - type: list - elements: string - recurse: - type: boolean - description: If true, will retrieve all the values that have the given key as prefix. - default: false - index: - description: - - If the key has a value with the specified index then this is returned allowing access to historical values. - datacenter: - description: - - Retrieve the key from a consul datacenter other than the default for the consul host. - token: - description: The acl token to allow access to restricted values. - host: - default: localhost - description: - - The target to connect to, must be a resolvable address. - Will be determined from C(ANSIBLE_CONSUL_URL) if that is set. - - "C(ANSIBLE_CONSUL_URL) should look like this: C(https://my.consul.server:8500)" - env: - - name: ANSIBLE_CONSUL_URL - ini: - - section: lookup_consul - key: host - port: - description: - - The port of the target host to connect to. - - If you use C(ANSIBLE_CONSUL_URL) this value will be used from there. - default: 8500 - scheme: - default: http - description: - - Whether to use http or https. - - If you use C(ANSIBLE_CONSUL_URL) this value will be used from there. - validate_certs: - default: true - description: Whether to verify the ssl connection or not. - env: - - name: ANSIBLE_CONSUL_VALIDATE_CERTS - ini: - - section: lookup_consul - key: validate_certs - client_cert: - description: The client cert to verify the ssl connection. - env: - - name: ANSIBLE_CONSUL_CLIENT_CERT - ini: - - section: lookup_consul - key: client_cert - url: - description: "The target to connect to, should look like this: C(https://my.consul.server:8500)." - type: str - version_added: 1.0.0 - env: - - name: ANSIBLE_CONSUL_URL - ini: - - section: lookup_consul - key: url -''' - -EXAMPLES = """ - - ansible.builtin.debug: - msg: 'key contains {{item}}' - with_community.general.consul_kv: - - 'key/to/retrieve' - - - name: Parameters can be provided after the key be more specific about what to retrieve - ansible.builtin.debug: - msg: 'key contains {{item}}' - with_community.general.consul_kv: - - 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98' - - - name: retrieving a KV from a remote cluster on non default port - ansible.builtin.debug: - msg: "{{ lookup('community.general.consul_kv', 'my/key', host='10.10.10.10', port='2000') }}" -""" - -RETURN = """ +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: consul_kv +short_description: Fetch metadata from a Consul key value store +description: + - Lookup metadata for a playbook from the key value store in a Consul cluster. Values can be easily set in the kv store + with simple rest commands. + - C(curl -X PUT -d 'some-value' http://localhost:8500/v1/kv/ansible/somedata). +requirements: + - 'py-consul python library U(https://github.com/criteo/py-consul?tab=readme-ov-file#installation)' +options: _raw: + description: List of key(s) to retrieve. + type: list + elements: string + recurse: + type: boolean + description: If V(true), retrieves all the values that have the given key as prefix. + default: false + index: description: - - Value(s) stored in consul. - type: dict + - If the key has a value with the specified index then this is returned allowing access to historical values. + type: int + datacenter: + description: + - Retrieve the key from a consul datacenter other than the default for the consul host. + type: str + token: + description: The acl token to allow access to restricted values. + type: str + host: + default: localhost + type: str + description: + - The target to connect to, must be a resolvable address. + - It is determined from E(ANSIBLE_CONSUL_URL) if that is set. + ini: + - section: lookup_consul + key: host + port: + description: + - The port of the target host to connect to. + - If you use E(ANSIBLE_CONSUL_URL) this value is used from there. + type: int + default: 8500 + scheme: + default: http + type: str + description: + - Whether to use http or https. + - If you use E(ANSIBLE_CONSUL_URL) this value is used from there. + validate_certs: + default: true + description: Whether to verify the TLS connection or not. + type: bool + env: + - name: ANSIBLE_CONSUL_VALIDATE_CERTS + ini: + - section: lookup_consul + key: validate_certs + client_cert: + description: The client cert to verify the TLS connection. + type: str + env: + - name: ANSIBLE_CONSUL_CLIENT_CERT + ini: + - section: lookup_consul + key: client_cert + url: + description: + - The target to connect to. + - 'Should look like this: V(https://my.consul.server:8500).' + type: str + version_added: 1.0.0 + env: + - name: ANSIBLE_CONSUL_URL + ini: + - section: lookup_consul + key: url """ -import os -from ansible.module_utils.six.moves.urllib.parse import urlparse +EXAMPLES = r""" +- ansible.builtin.debug: + msg: 'key contains {{item}}' + with_community.general.consul_kv: + - 'key/to/retrieve' + +- name: Parameters can be provided after the key be more specific about what to retrieve + ansible.builtin.debug: + msg: 'key contains {{item}}' + with_community.general.consul_kv: + - 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98' + +- name: retrieving a KV from a remote cluster on non default port + ansible.builtin.debug: + msg: "{{ lookup('community.general.consul_kv', 'my/key', host='10.10.10.10', port=2000) }}" +""" + +RETURN = r""" +_raw: + description: + - Value(s) stored in consul. + type: dict +""" + +from urllib.parse import urlparse + from ansible.errors import AnsibleError, AnsibleAssertionError from ansible.plugins.lookup import LookupBase from ansible.module_utils.common.text.converters import to_text @@ -125,7 +130,7 @@ class LookupModule(LookupBase): if not HAS_CONSUL: raise AnsibleError( - 'python-consul is required for consul_kv lookup. see http://python-consul.readthedocs.org/en/latest/#installation') + 'py-consul is required for consul_kv lookup. see https://github.com/criteo/py-consul?tab=readme-ov-file#installation') # get options self.set_options(direct=kwargs) @@ -165,7 +170,7 @@ class LookupModule(LookupBase): values.append(to_text(results[1]['Value'])) except Exception as e: raise AnsibleError( - "Error locating '%s' in kv store. Error was %s" % (term, e)) + f"Error locating '{term}' in kv store. Error was {e}") return values @@ -186,7 +191,7 @@ class LookupModule(LookupBase): if param and len(param) > 0: name, value = param.split('=') if name not in paramvals: - raise AnsibleAssertionError("%s not a valid consul lookup parameter" % name) + raise AnsibleAssertionError(f"{name} not a valid consul lookup parameter") paramvals[name] = value except (ValueError, AssertionError) as e: raise AnsibleError(e) diff --git a/plugins/lookup/credstash.py b/plugins/lookup/credstash.py index d49d5b23cb..01e6a1a8fe 100644 --- a/plugins/lookup/credstash.py +++ b/plugins/lookup/credstash.py @@ -1,59 +1,57 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2015, Ensighten # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: credstash - short_description: retrieve secrets from Credstash on AWS - requirements: - - credstash (python library) - description: - - "Credstash is a small utility for managing secrets using AWS's KMS and DynamoDB: https://github.com/fugue/credstash" - options: - _terms: - description: term or list of terms to lookup in the credit store - type: list - elements: string - required: true - table: - description: name of the credstash table to query - type: str - default: 'credential-store' - version: - description: Credstash version - type: str - default: '' - region: - description: AWS region - type: str - profile_name: - description: AWS profile to use for authentication - type: str - env: - - name: AWS_PROFILE - aws_access_key_id: - description: AWS access key ID - type: str - env: - - name: AWS_ACCESS_KEY_ID - aws_secret_access_key: - description: AWS access key - type: str - env: - - name: AWS_SECRET_ACCESS_KEY - aws_session_token: - description: AWS session token - type: str - env: - - name: AWS_SESSION_TOKEN -''' +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: credstash +short_description: Retrieve secrets from Credstash on AWS +requirements: + - credstash (python library) +description: + - "Credstash is a small utility for managing secrets using AWS's KMS and DynamoDB: https://github.com/fugue/credstash." +options: + _terms: + description: Term or list of terms to lookup in the credit store. + type: list + elements: string + required: true + table: + description: Name of the credstash table to query. + type: str + default: 'credential-store' + version: + description: Credstash version. + type: str + default: '' + region: + description: AWS region. + type: str + profile_name: + description: AWS profile to use for authentication. + type: str + env: + - name: AWS_PROFILE + aws_access_key_id: + description: AWS access key ID. + type: str + env: + - name: AWS_ACCESS_KEY_ID + aws_secret_access_key: + description: AWS access key. + type: str + env: + - name: AWS_SECRET_ACCESS_KEY + aws_session_token: + description: AWS session token. + type: str + env: + - name: AWS_SESSION_TOKEN +""" -EXAMPLES = """ +EXAMPLES = r""" - name: first use credstash to store your secrets ansible.builtin.shell: credstash put my-github-password secure123 @@ -77,24 +75,22 @@ EXAMPLES = """ environment: production tasks: - - name: "Test credstash lookup plugin -- get the password with a context passed as a variable" - ansible.builtin.debug: - msg: "{{ lookup('community.general.credstash', 'some-password', context=context) }}" + - name: "Test credstash lookup plugin -- get the password with a context passed as a variable" + ansible.builtin.debug: + msg: "{{ lookup('community.general.credstash', 'some-password', context=context) }}" - - name: "Test credstash lookup plugin -- get the password with a context defined here" - ansible.builtin.debug: - msg: "{{ lookup('community.general.credstash', 'some-password', context=dict(app='my_app', environment='production')) }}" + - name: "Test credstash lookup plugin -- get the password with a context defined here" + ansible.builtin.debug: + msg: "{{ lookup('community.general.credstash', 'some-password', context=dict(app='my_app', environment='production')) }}" """ -RETURN = """ - _raw: - description: - - Value(s) stored in Credstash. - type: str +RETURN = r""" +_raw: + description: + - Value(s) stored in Credstash. + type: str """ -import os - from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase @@ -122,10 +118,10 @@ class LookupModule(LookupBase): aws_secret_access_key = self.get_option('aws_secret_access_key') aws_session_token = self.get_option('aws_session_token') - context = dict( - (k, v) for k, v in kwargs.items() + context = { + k: v for k, v in kwargs.items() if k not in ('version', 'region', 'table', 'profile_name', 'aws_access_key_id', 'aws_secret_access_key', 'aws_session_token') - ) + } kwargs_pass = { 'profile_name': profile_name, @@ -139,8 +135,8 @@ class LookupModule(LookupBase): try: ret.append(credstash.getSecret(term, version, region, table, context=context, **kwargs_pass)) except credstash.ItemNotFound: - raise AnsibleError('Key {0} not found'.format(term)) + raise AnsibleError(f'Key {term} not found') except Exception as e: - raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e)) + raise AnsibleError(f'Encountered exception while fetching {term}: {e}') return ret diff --git a/plugins/lookup/cyberarkpassword.py b/plugins/lookup/cyberarkpassword.py index 1e005e23e8..955ba4a89a 100644 --- a/plugins/lookup/cyberarkpassword.py +++ b/plugins/lookup/cyberarkpassword.py @@ -1,63 +1,67 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017, Edward Nunez # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: Unknown (!UNKNOWN) - name: cyberarkpassword - short_description: get secrets from CyberArk AIM - requirements: - - CyberArk AIM tool installed +DOCUMENTATION = r""" +author: Unknown (!UNKNOWN) +name: cyberarkpassword +short_description: Get secrets from CyberArk AIM +requirements: + - CyberArk AIM tool installed +description: + - Get secrets from CyberArk AIM. +options: + _command: + description: Cyberark CLI utility. + type: string + env: + - name: AIM_CLIPASSWORDSDK_CMD + default: '/opt/CARKaim/sdk/clipasswordsdk' + appid: + description: Defines the unique ID of the application that is issuing the password request. + type: string + required: true + query: + description: Describes the filter criteria for the password retrieval. + type: string + required: true + output: description: - - Get secrets from CyberArk AIM. - options : - _command: - description: Cyberark CLI utility. - env: - - name: AIM_CLIPASSWORDSDK_CMD - default: '/opt/CARKaim/sdk/clipasswordsdk' - appid: - description: Defines the unique ID of the application that is issuing the password request. - required: true - query: - description: Describes the filter criteria for the password retrieval. - required: true - output: - description: - - Specifies the desired output fields separated by commas. - - "They could be: Password, PassProps., PasswordChangeInProcess" - default: 'password' - _extra: - description: for extra_params values please check parameters for clipasswordsdk in CyberArk's "Credential Provider and ASCP Implementation Guide" - notes: - - For Ansible on Windows, please change the -parameters (-p, -d, and -o) to /parameters (/p, /d, and /o) and change the location of CLIPasswordSDK.exe. -''' - -EXAMPLES = """ - - name: passing options to the lookup - ansible.builtin.debug: - msg: '{{ lookup("community.general.cyberarkpassword", cyquery) }}' - vars: - cyquery: - appid: "app_ansible" - query: "safe=CyberArk_Passwords;folder=root;object=AdminPass" - output: "Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess" - - - - name: used in a loop - ansible.builtin.debug: - msg: "{{item}}" - with_community.general.cyberarkpassword: - appid: 'app_ansible' - query: 'safe=CyberArk_Passwords;folder=root;object=AdminPass' - output: 'Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess' + - Specifies the desired output fields separated by commas. + - 'They could be: Password, PassProps., PasswordChangeInProcess.' + type: string + default: 'password' + _extra: + description: For extra_params values please check parameters for clipasswordsdk in CyberArk's "Credential Provider and + ASCP Implementation Guide". +notes: + - For Ansible on Windows, please change the -parameters (C(-p), C(-d), and C(-o)) to /parameters (C(/p), C(/d), and C(/o)) + and change the location of C(CLIPasswordSDK.exe). """ -RETURN = """ +EXAMPLES = r""" +- name: passing options to the lookup + ansible.builtin.debug: + msg: '{{ lookup("community.general.cyberarkpassword", cyquery) }}' + vars: + cyquery: + appid: "app_ansible" + query: "safe=CyberArk_Passwords;folder=root;object=AdminPass" + output: "Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess" + + +- name: used in a loop + ansible.builtin.debug: + msg: "{{item}}" + with_community.general.cyberarkpassword: + appid: 'app_ansible' + query: 'safe=CyberArk_Passwords;folder=root;object=AdminPass' + output: 'Password,PassProps.UserName,PassProps.Address,PasswordChangeInProcess' +""" + +RETURN = r""" _result: description: A list containing one dictionary. type: list @@ -65,12 +69,12 @@ _result: contains: password: description: - - The actual value stored + - The actual value stored. passprops: - description: properties assigned to the entry + description: Properties assigned to the entry. type: dictionary passwordchangeinprocess: - description: did the password change? + description: Did the password change? """ import os @@ -80,8 +84,7 @@ from subprocess import Popen from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase -from ansible.parsing.splitter import parse_kv -from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native +from ansible.module_utils.common.text.converters import to_bytes, to_native from ansible.utils.display import Display display = Display() @@ -102,7 +105,7 @@ class CyberarkPassword: self.extra_parms = [] for key, value in kwargs.items(): self.extra_parms.append('-p') - self.extra_parms.append("%s=%s" % (key, value)) + self.extra_parms.append(f"{key}={value}") if self.appid is None: raise AnsibleError("CyberArk Error: No Application ID specified") @@ -127,8 +130,8 @@ class CyberarkPassword: all_parms = [ CLIPASSWORDSDK_CMD, 'GetPassword', - '-p', 'AppDescs.AppID=%s' % self.appid, - '-p', 'Query=%s' % self.query, + '-p', f'AppDescs.AppID={self.appid}', + '-p', f'Query={self.query}', '-o', self.output, '-d', self.b_delimiter] all_parms.extend(self.extra_parms) @@ -141,7 +144,7 @@ class CyberarkPassword: b_credential = to_bytes(tmp_output) if tmp_error: - raise AnsibleError("ERROR => %s " % (tmp_error)) + raise AnsibleError(f"ERROR => {tmp_error} ") if b_credential and b_credential.endswith(b'\n'): b_credential = b_credential[:-1] @@ -161,7 +164,7 @@ class CyberarkPassword: except subprocess.CalledProcessError as e: raise AnsibleError(e.output) except OSError as e: - raise AnsibleError("ERROR - AIM not installed or clipasswordsdk not in standard location. ERROR=(%s) => %s " % (to_text(e.errno), e.strerror)) + raise AnsibleError(f"ERROR - AIM not installed or clipasswordsdk not in standard location. ERROR=({e.errno}) => {e.strerror} ") return [result_dict] @@ -174,11 +177,11 @@ class LookupModule(LookupBase): """ def run(self, terms, variables=None, **kwargs): - display.vvvv("%s" % terms) + display.vvvv(f"{terms}") if isinstance(terms, list): return_values = [] for term in terms: - display.vvvv("Term: %s" % term) + display.vvvv(f"Term: {term}") cyberark_conn = CyberarkPassword(**term) return_values.append(cyberark_conn.get()) return return_values diff --git a/plugins/lookup/dependent.py b/plugins/lookup/dependent.py index 54714344eb..89502e9518 100644 --- a/plugins/lookup/dependent.py +++ b/plugins/lookup/dependent.py @@ -1,36 +1,33 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2015-2021, Felix Fontein # Copyright (c) 2018 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = """ +DOCUMENTATION = r""" name: dependent short_description: Composes a list with nested elements of other lists or dicts which can depend on previous loop variables author: Felix Fontein (@felixfontein) version_added: 3.1.0 description: - - "Takes the input lists and returns a list with elements that are lists, dictionaries, - or template expressions which evaluate to lists or dicts, composed of the elements of - the input evaluated lists and dictionaries." + - Takes the input lists and returns a list with elements that are lists, dictionaries, or template expressions which evaluate + to lists or dicts, composed of the elements of the input evaluated lists and dictionaries. options: _terms: description: - - A list where the elements are one-element dictionaries, mapping a name to a string, list, or dictionary. - The name is the index that is used in the result object. The value is iterated over as described below. + - A list where the elements are one-element dictionaries, mapping a name to a string, list, or dictionary. The name + is the index that is used in the result object. The value is iterated over as described below. - If the value is a list, it is simply iterated over. - - If the value is a dictionary, it is iterated over and returned as if they would be processed by the - R(ansible.builtin.dict2items filter,ansible_collections.ansible.builtin.dict2items_filter). - - If the value is a string, it is evaluated as Jinja2 expressions which can access the previously chosen - elements with C(item.). The result must be a list or a dictionary. + - If the value is a dictionary, it is iterated over and returned as if they would be processed by the P(ansible.builtin.dict2items#filter) + filter. + - If the value is a string, it is evaluated as Jinja2 expressions which can access the previously chosen elements with + C(item.). The result must be a list or a dictionary. type: list elements: dict required: true """ -EXAMPLES = """ +EXAMPLES = r""" - name: Install/remove public keys for active admin users ansible.posix.authorized_key: user: "{{ item.admin.key }}" @@ -76,9 +73,9 @@ EXAMPLES = """ loop_control: # Makes the output readable, so that it doesn't contain the whole subdictionaries and lists label: |- - {{ [item.zone.key, item.prefix.key, item.entry.key, - item.entry.value.ttl | default(3600), - item.entry.value.absent | default(False), item.entry.value.value] }} + {{ [item.zone.key, item.prefix.key, item.entry.key, + item.entry.value.ttl | default(3600), + item.entry.value.absent | default(False), item.entry.value.value] }} with_community.general.dependent: - zone: dns_setup - prefix: item.zone.value @@ -89,51 +86,54 @@ EXAMPLES = """ '': A: value: - - 1.2.3.4 + - 1.2.3.4 AAAA: value: - - "2a01:1:2:3::1" + - "2a01:1:2:3::1" 'test._domainkey': TXT: ttl: 300 value: - - '"k=rsa; t=s; p=MIGfMA..."' + - '"k=rsa; t=s; p=MIGfMA..."' example.org: 'www': A: value: - - 1.2.3.4 - - 5.6.7.8 + - 1.2.3.4 + - 5.6.7.8 """ -RETURN = """ - _list: - description: - - A list composed of dictionaries whose keys are the variable names from the input list. - type: list - elements: dict - sample: - - key1: a - key2: test - - key1: a - key2: foo - - key1: b - key2: bar +RETURN = r""" +_list: + description: + - A list composed of dictionaries whose keys are the variable names from the input list. + type: list + elements: dict + sample: + - key1: a + key2: test + - key1: a + key2: foo + - key1: b + key2: bar """ from ansible.errors import AnsibleLookupError -from ansible.module_utils.common._collections_compat import Mapping, Sequence -from ansible.module_utils.six import string_types +from collections.abc import Mapping, Sequence from ansible.plugins.lookup import LookupBase -from ansible.release import __version__ as ansible_version from ansible.template import Templar -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion +try: + from ansible.template import trust_as_template as _trust_as_template + HAS_DATATAGGING = True +except ImportError: + HAS_DATATAGGING = False -# Whether Templar has a cache, which can be controlled by Templar.template()'s cache option. -# The cache was removed for ansible-core 2.14 (https://github.com/ansible/ansible/pull/78419) -_TEMPLAR_HAS_TEMPLATE_CACHE = LooseVersion(ansible_version) < LooseVersion('2.14.0') +def _make_safe(value): + if HAS_DATATAGGING and isinstance(value, str): + return _trust_as_template(value) + return value class LookupModule(LookupBase): @@ -144,10 +144,11 @@ class LookupModule(LookupBase): ``variables`` are the variables to use. """ templar.available_variables = variables or {} - expression = "{0}{1}{2}".format("{{", expression, "}}") - if _TEMPLAR_HAS_TEMPLATE_CACHE: - return templar.template(expression, cache=False) - return templar.template(expression) + quoted_expression = "{0}{1}{2}".format("{{", expression, "}}") + if hasattr(templar, 'evaluate_expression'): + # This is available since the Data Tagging PR has been merged + return templar.evaluate_expression(_make_safe(expression)) + return templar.template(quoted_expression) def __process(self, result, terms, index, current, templar, variables): """Fills ``result`` list with evaluated items. @@ -173,12 +174,11 @@ class LookupModule(LookupBase): values = self.__evaluate(expression, templar, variables=vars) except Exception as e: raise AnsibleLookupError( - 'Caught "{error}" while evaluating {key!r} with item == {item!r}'.format( - error=e, key=key, item=current)) + f'Caught "{e}" while evaluating {key!r} with item == {current!r}') if isinstance(values, Mapping): for idx, val in sorted(values.items()): - current[key] = dict([('key', idx), ('value', val)]) + current[key] = dict(key=idx, value=val) self.__process(result, terms, index + 1, current, templar, variables) elif isinstance(values, Sequence): for elt in values: @@ -186,8 +186,7 @@ class LookupModule(LookupBase): self.__process(result, terms, index + 1, current, templar, variables) else: raise AnsibleLookupError( - 'Did not obtain dictionary or list while evaluating {key!r} with item == {item!r}, but {type}'.format( - key=key, item=current, type=type(values))) + f'Did not obtain dictionary or list while evaluating {key!r} with item == {current!r}, but {type(values)}') def run(self, terms, variables=None, **kwargs): """Generate list.""" @@ -195,30 +194,30 @@ class LookupModule(LookupBase): result = [] if len(terms) > 0: - templar = Templar(loader=self._templar._loader) + if HAS_DATATAGGING: + templar = self._templar.copy_with_new_env(available_variables={}) + else: + templar = Templar(loader=self._templar._loader) data = [] vars_so_far = set() for index, term in enumerate(terms): if not isinstance(term, Mapping): raise AnsibleLookupError( - 'Parameter {index} must be a dictionary, got {type}'.format( - index=index, type=type(term))) + f'Parameter {index} must be a dictionary, got {type(term)}') if len(term) != 1: raise AnsibleLookupError( - 'Parameter {index} must be a one-element dictionary, got {count} elements'.format( - index=index, count=len(term))) + f'Parameter {index} must be a one-element dictionary, got {len(term)} elements') k, v = list(term.items())[0] if k in vars_so_far: raise AnsibleLookupError( - 'The variable {key!r} appears more than once'.format(key=k)) + f'The variable {k!r} appears more than once') vars_so_far.add(k) - if isinstance(v, string_types): + if isinstance(v, str): data.append((k, v, None)) elif isinstance(v, (Sequence, Mapping)): data.append((k, None, v)) else: raise AnsibleLookupError( - 'Parameter {key!r} (index {index}) must have a value of type string, dictionary or list, got type {type}'.format( - index=index, key=k, type=type(v))) + f'Parameter {k!r} (index {index}) must have a value of type string, dictionary or list, got type {type(v)}') self.__process(result, data, 0, {}, templar, variables) return result diff --git a/plugins/lookup/dig.py b/plugins/lookup/dig.py index d0d94e988a..b36f02d7d4 100644 --- a/plugins/lookup/dig.py +++ b/plugins/lookup/dig.py @@ -1,83 +1,116 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2015, Jan-Piet Mens # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: dig - author: Jan-Piet Mens (@jpmens) - short_description: query DNS using the dnspython library - requirements: - - dnspython (python library, http://www.dnspython.org/) +DOCUMENTATION = r""" +name: dig +author: Jan-Piet Mens (@jpmens) +short_description: Query DNS using the dnspython library +requirements: + - dnspython (python library, http://www.dnspython.org/) +description: + - The dig lookup runs queries against DNS servers to retrieve DNS records for a specific name (FQDN - fully qualified domain + name). It is possible to lookup any DNS record in this manner. + - There is a couple of different syntaxes that can be used to specify what record should be retrieved, and for which name. + It is also possible to explicitly specify the DNS server(s) to use for lookups. + - In its simplest form, the dig lookup plugin can be used to retrieve an IPv4 address (DNS A record) associated with FQDN. + - In addition to (default) A record, it is also possible to specify a different record type that should be queried. This + can be done by either passing-in additional parameter of format qtype=TYPE to the dig lookup, or by appending /TYPE to + the FQDN being queried. + - If multiple values are associated with the requested record, the results are returned as a comma-separated list. In + such cases you may want to pass option C(wantlist=true) to the lookup call, or alternatively use C(query) instead of C(lookup), + which results in the record values being returned as a list over which you can iterate later on. + - By default, the lookup relies on system-wide configured DNS servers for performing the query. It is also possible to + explicitly specify DNS servers to query using the @DNS_SERVER_1,DNS_SERVER_2,...,DNS_SERVER_N notation. This needs to + be passed-in as an additional parameter to the lookup. +options: + _terms: + description: Domain(s) to query. + type: list + elements: str + qtype: description: - - The dig lookup runs queries against DNS servers to retrieve DNS records for a specific name (FQDN - fully qualified domain name). - It is possible to lookup any DNS record in this manner. - - There is a couple of different syntaxes that can be used to specify what record should be retrieved, and for which name. - It is also possible to explicitly specify the DNS server(s) to use for lookups. - - In its simplest form, the dig lookup plugin can be used to retrieve an IPv4 address (DNS A record) associated with FQDN - - In addition to (default) A record, it is also possible to specify a different record type that should be queried. - This can be done by either passing-in additional parameter of format qtype=TYPE to the dig lookup, or by appending /TYPE to the FQDN being queried. - - If multiple values are associated with the requested record, the results will be returned as a comma-separated list. - In such cases you may want to pass option I(wantlist=true) to the lookup call, or alternatively use C(query) instead of C(lookup), - which will result in the record values being returned as a list over which you can iterate later on. - - By default, the lookup will rely on system-wide configured DNS servers for performing the query. - It is also possible to explicitly specify DNS servers to query using the @DNS_SERVER_1,DNS_SERVER_2,...,DNS_SERVER_N notation. - This needs to be passed-in as an additional parameter to the lookup - options: - _terms: - description: Domain(s) to query. - type: list - elements: str - qtype: - description: - - Record type to query. - - C(DLV) has been removed in community.general 6.0.0. - - C(CAA) has been added in community.general 6.3.0. - type: str - default: 'A' - choices: [A, ALL, AAAA, CAA, CNAME, DNAME, DNSKEY, DS, HINFO, LOC, MX, NAPTR, NS, NSEC3PARAM, PTR, RP, RRSIG, SOA, SPF, SRV, SSHFP, TLSA, TXT] - flat: - description: If 0 each record is returned as a dictionary, otherwise a string. - type: int - default: 1 - retry_servfail: - description: Retry a nameserver if it returns SERVFAIL. - default: false - type: bool - version_added: 3.6.0 - fail_on_error: - description: - - Abort execution on lookup errors. - - The default for this option will likely change to C(true) in the future. - The current default, C(false), is used for backwards compatibility, and will result in empty strings - or the string C(NXDOMAIN) in the result in case of errors. - default: false - type: bool - version_added: 5.4.0 - real_empty: - description: - - Return empty result without empty strings, and return empty list instead of C(NXDOMAIN). - - The default for this option will likely change to C(true) in the future. - default: false - type: bool - version_added: 6.0.0 - class: - description: - - "Class." - type: str - default: 'IN' - notes: - - ALL is not a record per-se, merely the listed fields are available for any record results you retrieve in the form of a dictionary. - - While the 'dig' lookup plugin supports anything which dnspython supports out of the box, only a subset can be converted into a dictionary. - - If you need to obtain the AAAA record (IPv6 address), you must specify the record type explicitly. - Syntax for specifying the record type is shown in the examples below. - - The trailing dot in most of the examples listed is purely optional, but is specified for completeness/correctness sake. -''' + - Record type to query. + - V(DLV) has been removed in community.general 6.0.0. + - V(CAA) has been added in community.general 6.3.0. + type: str + default: 'A' + choices: + - A + - ALL + - AAAA + - CAA + - CNAME + - DNAME + - DNSKEY + - DS + - HINFO + - LOC + - MX + - NAPTR + - NS + - NSEC3PARAM + - PTR + - RP + - RRSIG + - SOA + - SPF + - SRV + - SSHFP + - TLSA + - TXT + flat: + description: If 0 each record is returned as a dictionary, otherwise a string. + type: int + default: 1 + retry_servfail: + description: Retry a nameserver if it returns SERVFAIL. + default: false + type: bool + version_added: 3.6.0 + fail_on_error: + description: + - Abort execution on lookup errors. + - The default for this option is likely to change to V(true) in the future. The current default, V(false), is used for + backwards compatibility, and results in empty strings or the string V(NXDOMAIN) in the result in case of errors. + default: false + type: bool + version_added: 5.4.0 + real_empty: + description: + - Return empty result without empty strings, and return empty list instead of V(NXDOMAIN). + - The default for this option is likely to change to V(true) in the future. + - This option is forced to V(true) if multiple domains to be queried are specified. + default: false + type: bool + version_added: 6.0.0 + class: + description: + - Class. + type: str + default: 'IN' + tcp: + description: Use TCP to lookup DNS records. + default: false + type: bool + version_added: 7.5.0 + port: + description: Use port as target port when looking up DNS records. + default: 53 + type: int + version_added: 9.5.0 +notes: + - V(ALL) is not a record in itself, merely the listed fields are available for any record results you retrieve in the form + of a dictionary. + - While the plugin supports anything which C(dnspython) supports out of the box, only a subset can be converted into a dictionary. + - If you need to obtain the AAAA record (IPv6 address), you must specify the record type explicitly. Syntax for specifying + the record type is shown in the examples below. + - The trailing dot in most of the examples listed is purely optional, but is specified for completeness/correctness sake. +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Simple A record (IPV4 address) lookup for example.com ansible.builtin.debug: msg: "{{ lookup('community.general.dig', 'example.com.')}}" @@ -95,6 +128,21 @@ EXAMPLES = """ msg: "MX record for gmail.com {{ item }}" with_items: "{{ lookup('community.general.dig', 'gmail.com./MX', wantlist=true) }}" +- name: Lookup multiple names at once + ansible.builtin.debug: + msg: "A record found {{ item }}" + loop: "{{ query('community.general.dig', 'example.org.', 'example.com.', 'gmail.com.') }}" + +- name: Lookup multiple names at once (from list variable) + ansible.builtin.debug: + msg: "A record found {{ item }}" + loop: "{{ query('community.general.dig', *hosts) }}" + vars: + hosts: + - example.org. + - example.com. + - gmail.com. + - ansible.builtin.debug: msg: "Reverse DNS for 192.0.2.5 is {{ lookup('community.general.dig', '192.0.2.5/PTR') }}" - ansible.builtin.debug: @@ -113,88 +161,87 @@ EXAMPLES = """ msg: "{{ lookup('community.general.dig', 'example.org./A', retry_servfail=true) }}" """ -RETURN = """ - _list: - description: - - List of composed strings or dictionaries with key and value - If a dictionary, fields shows the keys returned depending on query type - type: list - elements: raw - contains: - ALL: - description: - - owner, ttl, type - A: - description: - - address - AAAA: - description: - - address - CAA: - description: - - flags - - tag - - value - version_added: 6.3.0 - CNAME: - description: - - target - DNAME: - description: - - target - DNSKEY: - description: - - flags, algorithm, protocol, key - DS: - description: - - algorithm, digest_type, key_tag, digest - HINFO: - description: - - cpu, os - LOC: - description: - - latitude, longitude, altitude, size, horizontal_precision, vertical_precision - MX: - description: - - preference, exchange - NAPTR: - description: - - order, preference, flags, service, regexp, replacement - NS: - description: - - target - NSEC3PARAM: - description: - - algorithm, flags, iterations, salt - PTR: - description: - - target - RP: - description: - - mbox, txt - SOA: - description: - - mname, rname, serial, refresh, retry, expire, minimum - SPF: - description: - - strings - SRV: - description: - - priority, weight, port, target - SSHFP: - description: - - algorithm, fp_type, fingerprint - TLSA: - description: - - usage, selector, mtype, cert - TXT: - description: - - strings +RETURN = r""" +_list: + description: + - List of composed strings or of dictionaries, with fields depending + on query type. + type: list + elements: raw + contains: + ALL: + description: + - C(owner), C(ttl), C(type). + A: + description: + - C(address). + AAAA: + description: + - C(address). + CAA: + description: + - C(flags). + - C(tag). + - C(value). + version_added: 6.3.0 + CNAME: + description: + - C(target). + DNAME: + description: + - C(target). + DNSKEY: + description: + - C(flags), C(algorithm), C(protocol), C(key). + DS: + description: + - C(algorithm), C(digest_type), C(key_tag), C(digest). + HINFO: + description: + - C(cpu), C(os). + LOC: + description: + - C(latitude), C(longitude), C(altitude), C(size), C(horizontal_precision), C(vertical_precision). + MX: + description: + - C(preference), C(exchange). + NAPTR: + description: + - C(order), C(preference), C(flags), C(service), C(regexp), C(replacement). + NS: + description: + - C(target). + NSEC3PARAM: + description: + - C(algorithm), C(flags), C(iterations), C(salt). + PTR: + description: + - C(target). + RP: + description: + - C(mbox), C(txt). + SOA: + description: + - C(mname), C(rname), C(serial), C(refresh), C(retry), C(expire), C(minimum). + SPF: + description: + - C(strings). + SRV: + description: + - C(priority), C(weight), C(port), C(target). + SSHFP: + description: + - C(algorithm), C(fp_type), C(fingerprint). + TLSA: + description: + - C(usage), C(selector), C(mtype), C(cert). + TXT: + description: + - C(strings). """ from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase -from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.parsing.convert_bool import boolean from ansible.utils.display import Display import socket @@ -308,22 +355,24 @@ class LookupModule(LookupBase): edns_size = 4096 myres.use_edns(0, ednsflags=dns.flags.DO, payload=edns_size) - domain = None + domains = [] + nameservers = [] qtype = self.get_option('qtype') flat = self.get_option('flat') fail_on_error = self.get_option('fail_on_error') real_empty = self.get_option('real_empty') + tcp = self.get_option('tcp') + port = self.get_option('port') try: rdclass = dns.rdataclass.from_text(self.get_option('class')) except Exception as e: - raise AnsibleError("dns lookup illegal CLASS: %s" % to_native(e)) + raise AnsibleError(f"dns lookup illegal CLASS: {e}") myres.retry_servfail = self.get_option('retry_servfail') for t in terms: if t.startswith('@'): # e.g. "@10.0.1.2,192.0.2.1" is ok. nsset = t[1:].split(',') for ns in nsset: - nameservers = [] # Check if we have a valid IP address. If so, use that, otherwise # try to resolve name to address using system's resolver. If that # fails we bail out. @@ -335,8 +384,7 @@ class LookupModule(LookupBase): nsaddr = dns.resolver.query(ns)[0].address nameservers.append(nsaddr) except Exception as e: - raise AnsibleError("dns lookup NS: %s" % to_native(e)) - myres.nameservers = nameservers + raise AnsibleError(f"dns lookup NS: {e}") continue if '=' in t: try: @@ -352,76 +400,86 @@ class LookupModule(LookupBase): try: rdclass = dns.rdataclass.from_text(arg) except Exception as e: - raise AnsibleError("dns lookup illegal CLASS: %s" % to_native(e)) + raise AnsibleError(f"dns lookup illegal CLASS: {e}") elif opt == 'retry_servfail': myres.retry_servfail = boolean(arg) elif opt == 'fail_on_error': fail_on_error = boolean(arg) elif opt == 'real_empty': real_empty = boolean(arg) + elif opt == 'tcp': + tcp = boolean(arg) continue if '/' in t: try: domain, qtype = t.split('/') + domains.append(domain) except Exception: - domain = t + domains.append(t) else: - domain = t + domains.append(t) - # print "--- domain = {0} qtype={1} rdclass={2}".format(domain, qtype, rdclass) + # print "--- domain = {domain} qtype={qtype} rdclass={rdclass}" + + if port: + myres.port = port + if len(nameservers) > 0: + myres.nameservers = nameservers + + if qtype.upper() == 'PTR': + reversed_domains = [] + for domain in domains: + try: + n = dns.reversename.from_address(domain) + reversed_domains.append(n.to_text()) + except dns.exception.SyntaxError: + pass + except Exception as e: + raise AnsibleError(f"dns.reversename unhandled exception {e}") + domains = reversed_domains + + if len(domains) > 1: + real_empty = True ret = [] - if qtype.upper() == 'PTR': + for domain in domains: try: - n = dns.reversename.from_address(domain) - domain = n.to_text() - except dns.exception.SyntaxError: - pass - except Exception as e: - raise AnsibleError("dns.reversename unhandled exception %s" % to_native(e)) + answers = myres.query(domain, qtype, rdclass=rdclass, tcp=tcp) + for rdata in answers: + s = rdata.to_text() + if qtype.upper() == 'TXT': + s = s[1:-1] # Strip outside quotes on TXT rdata - try: - answers = myres.query(domain, qtype, rdclass=rdclass) - for rdata in answers: - s = rdata.to_text() - if qtype.upper() == 'TXT': - s = s[1:-1] # Strip outside quotes on TXT rdata + if flat: + ret.append(s) + else: + try: + rd = make_rdata_dict(rdata) + rd['owner'] = answers.canonical_name.to_text() + rd['type'] = dns.rdatatype.to_text(rdata.rdtype) + rd['ttl'] = answers.rrset.ttl + rd['class'] = dns.rdataclass.to_text(rdata.rdclass) - if flat: - ret.append(s) - else: - try: - rd = make_rdata_dict(rdata) - rd['owner'] = answers.canonical_name.to_text() - rd['type'] = dns.rdatatype.to_text(rdata.rdtype) - rd['ttl'] = answers.rrset.ttl - rd['class'] = dns.rdataclass.to_text(rdata.rdclass) + ret.append(rd) + except Exception as err: + if fail_on_error: + raise AnsibleError(f"Lookup failed: {err}") + ret.append(str(err)) - ret.append(rd) - except Exception as err: - if fail_on_error: - raise AnsibleError("Lookup failed: %s" % str(err)) - ret.append(str(err)) - - except dns.resolver.NXDOMAIN as err: - if fail_on_error: - raise AnsibleError("Lookup failed: %s" % str(err)) - if not real_empty: - ret.append('NXDOMAIN') - except dns.resolver.NoAnswer as err: - if fail_on_error: - raise AnsibleError("Lookup failed: %s" % str(err)) - if not real_empty: - ret.append("") - except dns.resolver.Timeout as err: - if fail_on_error: - raise AnsibleError("Lookup failed: %s" % str(err)) - if not real_empty: - ret.append("") - except dns.exception.DNSException as err: - raise AnsibleError("dns.resolver unhandled exception %s" % to_native(err)) + except dns.resolver.NXDOMAIN as err: + if fail_on_error: + raise AnsibleError(f"Lookup failed: {err}") + if not real_empty: + ret.append('NXDOMAIN') + except (dns.resolver.NoAnswer, dns.resolver.Timeout, dns.resolver.NoNameservers) as err: + if fail_on_error: + raise AnsibleError(f"Lookup failed: {err}") + if not real_empty: + ret.append("") + except dns.exception.DNSException as err: + raise AnsibleError(f"dns.resolver unhandled exception {err}") return ret diff --git a/plugins/lookup/dnstxt.py b/plugins/lookup/dnstxt.py index 55067dc82b..d83f08bb09 100644 --- a/plugins/lookup/dnstxt.py +++ b/plugins/lookup/dnstxt.py @@ -1,35 +1,33 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2012, Jan-Piet Mens # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: dnstxt - author: Jan-Piet Mens (@jpmens) - short_description: query a domain(s)'s DNS txt fields - requirements: - - dns/dns.resolver (python library) +DOCUMENTATION = r""" +name: dnstxt +author: Jan-Piet Mens (@jpmens) +short_description: Query a domain(s)'s DNS txt fields +requirements: + - dns/dns.resolver (python library) +description: + - Uses a python library to return the DNS TXT record for a domain. +options: + _terms: + description: Domain or list of domains to query TXT records from. + required: true + type: list + elements: string + real_empty: description: - - Uses a python library to return the DNS TXT record for a domain. - options: - _terms: - description: domain or list of domains to query TXT records from - required: true - type: list - elements: string - real_empty: - description: - - Return empty result without empty strings, and return empty list instead of C(NXDOMAIN). - - The default for this option will likely change to C(true) in the future. - default: false - type: bool - version_added: 6.0.0 -''' + - Return empty result without empty strings, and return empty list instead of V(NXDOMAIN). + - The default for this option is likely to change to V(true) in the future. + default: false + type: bool + version_added: 6.0.0 +""" -EXAMPLES = """ +EXAMPLES = r""" - name: show txt entry ansible.builtin.debug: msg: "{{lookup('community.general.dnstxt', ['test.example.com'])}}" @@ -48,11 +46,11 @@ EXAMPLES = """ with_community.general.dnstxt: "{{lookup('community.general.dnstxt', ['test.example.com']).split(',')}}" """ -RETURN = """ - _list: - description: - - values returned by the DNS TXT record. - type: list +RETURN = r""" +_list: + description: + - Values returned by the DNS TXT record. + type: list """ HAVE_DNS = False @@ -64,7 +62,6 @@ except ImportError: pass from ansible.errors import AnsibleError -from ansible.module_utils.common.text.converters import to_native from ansible.plugins.lookup import LookupBase # ============================================================== @@ -108,7 +105,7 @@ class LookupModule(LookupBase): continue string = '' except DNSException as e: - raise AnsibleError("dns.resolver unhandled exception %s" % to_native(e)) + raise AnsibleError(f"dns.resolver unhandled exception {e}") ret.append(''.join(string)) diff --git a/plugins/lookup/dsv.py b/plugins/lookup/dsv.py index 91a9d99212..594dd40f4e 100644 --- a/plugins/lookup/dsv.py +++ b/plugins/lookup/dsv.py @@ -1,10 +1,8 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2020, Adam Migus # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type DOCUMENTATION = r""" name: dsv @@ -12,76 +10,78 @@ author: Adam Migus (@amigus) short_description: Get secrets from Thycotic DevOps Secrets Vault version_added: 1.0.0 description: - - Uses the Thycotic DevOps Secrets Vault Python SDK to get Secrets from a - DSV I(tenant) using a I(client_id) and I(client_secret). + - Uses the Thycotic DevOps Secrets Vault Python SDK to get Secrets from a DSV O(tenant) using a O(client_id) and O(client_secret). requirements: - - python-dsv-sdk - https://pypi.org/project/python-dsv-sdk/ + - python-dsv-sdk - https://pypi.org/project/python-dsv-sdk/ options: - _terms: - description: The path to the secret, e.g. C(/staging/servers/web1). - required: true - tenant: - description: The first format parameter in the default I(url_template). - env: - - name: DSV_TENANT - ini: - - section: dsv_lookup - key: tenant - required: true - tld: - default: com - description: The top-level domain of the tenant; the second format - parameter in the default I(url_template). - env: - - name: DSV_TLD - ini: - - section: dsv_lookup - key: tld - required: false - client_id: - description: The client_id with which to request the Access Grant. - env: - - name: DSV_CLIENT_ID - ini: - - section: dsv_lookup - key: client_id - required: true - client_secret: - description: The client secret associated with the specific I(client_id). - env: - - name: DSV_CLIENT_SECRET - ini: - - section: dsv_lookup - key: client_secret - required: true - url_template: - default: https://{}.secretsvaultcloud.{}/v1 - description: The path to prepend to the base URL to form a valid REST - API request. - env: - - name: DSV_URL_TEMPLATE - ini: - - section: dsv_lookup - key: url_template - required: false + _terms: + description: The path to the secret, for example V(/staging/servers/web1). + required: true + tenant: + description: The first format parameter in the default O(url_template). + type: string + env: + - name: DSV_TENANT + ini: + - section: dsv_lookup + key: tenant + required: true + tld: + default: com + description: The top-level domain of the tenant; the second format parameter in the default O(url_template). + type: string + env: + - name: DSV_TLD + ini: + - section: dsv_lookup + key: tld + required: false + client_id: + description: The client_id with which to request the Access Grant. + type: string + env: + - name: DSV_CLIENT_ID + ini: + - section: dsv_lookup + key: client_id + required: true + client_secret: + description: The client secret associated with the specific O(client_id). + type: string + env: + - name: DSV_CLIENT_SECRET + ini: + - section: dsv_lookup + key: client_secret + required: true + url_template: + default: https://{}.secretsvaultcloud.{}/v1 + description: The path to prepend to the base URL to form a valid REST API request. + type: string + env: + - name: DSV_URL_TEMPLATE + ini: + - section: dsv_lookup + key: url_template + required: false """ RETURN = r""" _list: - description: - - One or more JSON responses to C(GET /secrets/{path}). - - See U(https://dsv.thycotic.com/api/index.html#operation/getSecret). - type: list - elements: dict + description: + - One or more JSON responses to C(GET /secrets/{path}). + - See U(https://dsv.thycotic.com/api/index.html#operation/getSecret). + type: list + elements: dict """ EXAMPLES = r""" - hosts: localhost vars: - secret: "{{ lookup('community.general.dsv', '/test/secret') }}" + secret: "{{ lookup('community.general.dsv', '/test/secret') }}" tasks: - - ansible.builtin.debug: - msg: 'the password is {{ secret["data"]["password"] }}' + - ansible.builtin.debug: + msg: 'the password is {{ secret["data"]["password"] }}' """ from ansible.errors import AnsibleError, AnsibleOptionsError @@ -130,17 +130,17 @@ class LookupModule(LookupBase): result = [] for term in terms: - display.debug("dsv_lookup term: %s" % term) + display.debug(f"dsv_lookup term: {term}") try: path = term.lstrip("[/:]") if path == "": - raise AnsibleOptionsError("Invalid secret path: %s" % term) + raise AnsibleOptionsError(f"Invalid secret path: {term}") - display.vvv(u"DevOps Secrets Vault GET /secrets/%s" % path) + display.vvv(f"DevOps Secrets Vault GET /secrets/{path}") result.append(vault.get_secret_json(path)) except SecretsVaultError as error: raise AnsibleError( - "DevOps Secrets Vault lookup failure: %s" % error.message + f"DevOps Secrets Vault lookup failure: {error.message}" ) return result diff --git a/plugins/lookup/etcd.py b/plugins/lookup/etcd.py index d6a12293e3..65a9d23d2f 100644 --- a/plugins/lookup/etcd.py +++ b/plugins/lookup/etcd.py @@ -1,47 +1,51 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2013, Jan-Piet Mens # (m) 2016, Mihai Moldovanu # (m) 2017, Juan Manuel Parrilla # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: - - Jan-Piet Mens (@jpmens) - name: etcd - short_description: get info from an etcd server +DOCUMENTATION = r""" +author: + - Jan-Piet Mens (@jpmens) +name: etcd +short_description: Get info from an etcd server +description: + - Retrieves data from an etcd server. +options: + _terms: description: - - Retrieves data from an etcd server - options: - _terms: - description: - - the list of keys to lookup on the etcd server - type: list - elements: string - required: true - url: - description: - - Environment variable with the url for the etcd server - default: 'http://127.0.0.1:4001' - env: - - name: ANSIBLE_ETCD_URL - version: - description: - - Environment variable with the etcd protocol version - default: 'v1' - env: - - name: ANSIBLE_ETCD_VERSION - validate_certs: - description: - - toggle checking that the ssl certificates are valid, you normally only want to turn this off with self-signed certs. - default: true - type: boolean -''' + - The list of keys to lookup on the etcd server. + type: list + elements: string + required: true + url: + description: + - Environment variable with the URL for the etcd server. + type: string + default: 'http://127.0.0.1:4001' + env: + - name: ANSIBLE_ETCD_URL + version: + description: + - Environment variable with the etcd protocol version. + type: string + default: 'v1' + env: + - name: ANSIBLE_ETCD_VERSION + validate_certs: + description: + - Toggle checking that the ssl certificates are valid, you normally only want to turn this off with self-signed certs. + default: true + type: boolean +seealso: + - module: community.general.etcd3 + - plugin: community.general.etcd3 + plugin_type: lookup +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: "a value from a locally running etcd" ansible.builtin.debug: msg: "{{ lookup('community.general.etcd', 'foo/bar') }}" @@ -50,18 +54,18 @@ EXAMPLES = ''' ansible.builtin.debug: msg: "{{ lookup('community.general.etcd', 'foo', 'bar', 'baz') }}" -- name: "since Ansible 2.5 you can set server options inline" +- name: "you can set server options inline" ansible.builtin.debug: msg: "{{ lookup('community.general.etcd', 'foo', version='v2', url='http://192.168.0.27:4001') }}" -''' +""" -RETURN = ''' - _raw: - description: - - list of values associated with input keys - type: list - elements: string -''' +RETURN = r""" +_raw: + description: + - List of values associated with input keys. + type: list + elements: string +""" import json @@ -98,7 +102,7 @@ class Etcd: def __init__(self, url, version, validate_certs): self.url = url self.version = version - self.baseurl = '%s/%s/keys' % (self.url, self.version) + self.baseurl = f'{self.url}/{self.version}/keys' self.validate_certs = validate_certs def _parse_node(self, node): @@ -119,7 +123,7 @@ class Etcd: return path def get(self, key): - url = "%s/%s?recursive=true" % (self.baseurl, key) + url = f"{self.baseurl}/{key}?recursive=true" data = None value = {} try: diff --git a/plugins/lookup/etcd3.py b/plugins/lookup/etcd3.py index df41d791e8..0312f17127 100644 --- a/plugins/lookup/etcd3.py +++ b/plugins/lookup/etcd3.py @@ -1,107 +1,105 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2020, SCC France, Eric Belhomme # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: - - Eric Belhomme (@eric-belhomme) - version_added: '0.2.0' - name: etcd3 - short_description: Get key values from etcd3 server +DOCUMENTATION = r""" +author: + - Eric Belhomme (@eric-belhomme) +version_added: '0.2.0' +name: etcd3 +short_description: Get key values from etcd3 server +description: + - Retrieves key values and/or key prefixes from etcd3 server using its native gRPC API. + - Try to reuse M(community.general.etcd3) options for connection parameters, but add support for some E(ETCDCTL_*) environment + variables. + - See U(https://github.com/etcd-io/etcd/tree/master/Documentation/op-guide) for etcd overview. +options: + _terms: description: - - Retrieves key values and/or key prefixes from etcd3 server using its native gRPC API. - - Try to reuse M(community.general.etcd3) options for connection parameters, but add support for some C(ETCDCTL_*) environment variables. - - See U(https://github.com/etcd-io/etcd/tree/master/Documentation/op-guide) for etcd overview. + - The list of keys (or key prefixes) to look up on the etcd3 server. + type: list + elements: str + required: true + prefix: + description: + - Look for key or prefix key. + type: bool + default: false + endpoints: + description: + - Counterpart of E(ETCDCTL_ENDPOINTS) environment variable. Specify the etcd3 connection with an URL form, for example + V(https://hostname:2379), or V(:) form. + - The V(host) part is overwritten by O(host) option, if defined. + - The V(port) part is overwritten by O(port) option, if defined. + env: + - name: ETCDCTL_ENDPOINTS + default: '127.0.0.1:2379' + type: str + host: + description: + - Etcd3 listening client host. + - Takes precedence over O(endpoints). + type: str + port: + description: + - Etcd3 listening client port. + - Takes precedence over O(endpoints). + type: int + ca_cert: + description: + - Etcd3 CA authority. + env: + - name: ETCDCTL_CACERT + type: str + cert_cert: + description: + - Etcd3 client certificate. + env: + - name: ETCDCTL_CERT + type: str + cert_key: + description: + - Etcd3 client private key. + env: + - name: ETCDCTL_KEY + type: str + timeout: + description: + - Client timeout. + default: 60 + env: + - name: ETCDCTL_DIAL_TIMEOUT + type: int + user: + description: + - Authenticated user name. + env: + - name: ETCDCTL_USER + type: str + password: + description: + - Authenticated user password. + env: + - name: ETCDCTL_PASSWORD + type: str - options: - _terms: - description: - - The list of keys (or key prefixes) to look up on the etcd3 server. - type: list - elements: str - required: true - prefix: - description: - - Look for key or prefix key. - type: bool - default: false - endpoints: - description: - - Counterpart of C(ETCDCTL_ENDPOINTS) environment variable. - Specify the etcd3 connection with and URL form eg. C(https://hostname:2379) or C(:) form. - - The C(host) part is overwritten by I(host) option, if defined. - - The C(port) part is overwritten by I(port) option, if defined. - env: - - name: ETCDCTL_ENDPOINTS - default: '127.0.0.1:2379' - type: str - host: - description: - - etcd3 listening client host. - - Takes precedence over I(endpoints). - type: str - port: - description: - - etcd3 listening client port. - - Takes precedence over I(endpoints). - type: int - ca_cert: - description: - - etcd3 CA authority. - env: - - name: ETCDCTL_CACERT - type: str - cert_cert: - description: - - etcd3 client certificate. - env: - - name: ETCDCTL_CERT - type: str - cert_key: - description: - - etcd3 client private key. - env: - - name: ETCDCTL_KEY - type: str - timeout: - description: - - Client timeout. - default: 60 - env: - - name: ETCDCTL_DIAL_TIMEOUT - type: int - user: - description: - - Authenticated user name. - env: - - name: ETCDCTL_USER - type: str - password: - description: - - Authenticated user password. - env: - - name: ETCDCTL_PASSWORD - type: str +notes: + - O(host) and O(port) options take precedence over (endpoints) option. + - The recommended way to connect to etcd3 server is using E(ETCDCTL_ENDPOINT) environment variable and keep O(endpoints), + O(host), and O(port) unused. +seealso: + - module: community.general.etcd3 + - plugin: community.general.etcd + plugin_type: lookup - notes: - - I(host) and I(port) options take precedence over (endpoints) option. - - The recommended way to connect to etcd3 server is using C(ETCDCTL_ENDPOINT) - environment variable and keep I(endpoints), I(host), and I(port) unused. - seealso: - - module: community.general.etcd3 - - ref: ansible_collections.community.general.etcd_lookup - description: The etcd v2 lookup. +requirements: + - "etcd3 >= 0.10" +""" - requirements: - - "etcd3 >= 0.10" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: "a value from a locally running etcd" ansible.builtin.debug: msg: "{{ lookup('community.general.etcd3', 'foo/bar') }}" @@ -117,31 +115,30 @@ EXAMPLES = ''' - name: "connect to etcd3 with a client certificate" ansible.builtin.debug: msg: "{{ lookup('community.general.etcd3', 'foo/bar', cert_cert='/etc/ssl/etcd/client.pem', cert_key='/etc/ssl/etcd/client.key') }}" -''' +""" -RETURN = ''' - _raw: - description: - - List of keys and associated values. - type: list - elements: dict - contains: - key: - description: The element's key. - type: str - value: - description: The element's value. - type: str -''' +RETURN = r""" +_raw: + description: + - List of keys and associated values. + type: list + elements: dict + contains: + key: + description: The element's key. + type: str + value: + description: The element's value. + type: str +""" import re -from ansible.plugins.lookup import LookupBase -from ansible.utils.display import Display +from ansible.errors import AnsibleLookupError from ansible.module_utils.basic import missing_required_lib from ansible.module_utils.common.text.converters import to_native from ansible.plugins.lookup import LookupBase -from ansible.errors import AnsibleError, AnsibleLookupError +from ansible.utils.display import Display try: import etcd3 @@ -169,7 +166,7 @@ def etcd3_client(client_params): etcd = etcd3.client(**client_params) etcd.status() except Exception as exp: - raise AnsibleLookupError('Cannot connect to etcd cluster: %s' % (to_native(exp))) + raise AnsibleLookupError(f'Cannot connect to etcd cluster: {exp}') return etcd @@ -205,7 +202,7 @@ class LookupModule(LookupBase): cnx_log = dict(client_params) if 'password' in cnx_log: cnx_log['password'] = '' - display.verbose("etcd3 connection parameters: %s" % cnx_log) + display.verbose(f"etcd3 connection parameters: {cnx_log}") # connect to etcd3 server etcd = etcd3_client(client_params) @@ -219,12 +216,12 @@ class LookupModule(LookupBase): if val and meta: ret.append({'key': to_native(meta.key), 'value': to_native(val)}) except Exception as exp: - display.warning('Caught except during etcd3.get_prefix: %s' % (to_native(exp))) + display.warning(f'Caught except during etcd3.get_prefix: {exp}') else: try: val, meta = etcd.get(term) if val and meta: ret.append({'key': to_native(meta.key), 'value': to_native(val)}) except Exception as exp: - display.warning('Caught except during etcd3.get: %s' % (to_native(exp))) + display.warning(f'Caught except during etcd3.get: {exp}') return ret diff --git a/plugins/lookup/filetree.py b/plugins/lookup/filetree.py index f12cc45192..49326edb87 100644 --- a/plugins/lookup/filetree.py +++ b/plugins/lookup/filetree.py @@ -1,25 +1,26 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2016 Dag Wieers # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" name: filetree author: Dag Wieers (@dagwieers) -short_description: recursively match all files in a directory tree +short_description: Recursively match all files in a directory tree description: -- This lookup enables you to template a complete tree of files on a target system while retaining permissions and ownership. -- Supports directories, files and symlinks, including SELinux and other file properties. -- If you provide more than one path, it will implement a first_found logic, and will not process entries it already processed in previous paths. - This enables merging different trees in order of importance, or add role_vars to specific paths to influence different instances of the same role. + - This lookup enables you to template a complete tree of files on a target system while retaining permissions and ownership. + - Supports directories, files and symlinks, including SELinux and other file properties. + - If you provide more than one path, it implements a first_found logic, and does not process entries it already processed + in previous paths. This enables merging different trees in order of importance, or add role_vars to specific paths to + influence different instances of the same role. options: _terms: - description: path(s) of files to read + description: Path(s) of files to read. required: true -''' + type: list + elements: string +""" EXAMPLES = r""" - name: Create directories @@ -57,61 +58,61 @@ EXAMPLES = r""" """ RETURN = r""" - _raw: - description: List of dictionaries with file information. - type: list - elements: dict - contains: - src: - description: - - Full path to file. - - Not returned when I(item.state) is set to C(directory). - type: path - root: - description: Allows filtering by original location. - type: path - path: - description: Contains the relative path to root. - type: path - mode: - description: The permissions the resulting file or directory. - type: str - state: - description: TODO - type: str - owner: - description: Name of the user that owns the file/directory. - type: raw - group: - description: Name of the group that owns the file/directory. - type: raw - seuser: - description: The user part of the SELinux file context. - type: raw - serole: - description: The role part of the SELinux file context. - type: raw - setype: - description: The type part of the SELinux file context. - type: raw - selevel: - description: The level part of the SELinux file context. - type: raw - uid: - description: Owner ID of the file/directory. - type: int - gid: - description: Group ID of the file/directory. - type: int - size: - description: Size of the target. - type: int - mtime: - description: Time of last modification. - type: float - ctime: - description: Time of last metadata update or creation (depends on OS). - type: float +_raw: + description: List of dictionaries with file information. + type: list + elements: dict + contains: + src: + description: + - Full path to file. + - Not returned when RV(_raw[].state) is set to V(directory). + type: path + root: + description: Allows filtering by original location. + type: path + path: + description: Contains the relative path to root. + type: path + mode: + description: The permissions the resulting file or directory. + type: str + state: + description: TODO. + type: str + owner: + description: Name of the user that owns the file/directory. + type: raw + group: + description: Name of the group that owns the file/directory. + type: raw + seuser: + description: The user part of the SELinux file context. + type: raw + serole: + description: The role part of the SELinux file context. + type: raw + setype: + description: The type part of the SELinux file context. + type: raw + selevel: + description: The level part of the SELinux file context. + type: raw + uid: + description: Owner ID of the file/directory. + type: int + gid: + description: Group ID of the file/directory. + type: int + size: + description: Size of the target. + type: int + mtime: + description: Time of last modification. + type: float + ctime: + description: Time of last metadata update or creation (depends on OS). + type: float """ import os import pwd @@ -156,7 +157,7 @@ def file_props(root, path): try: st = os.lstat(abspath) except OSError as e: - display.warning('filetree: Error using stat() on path %s (%s)' % (abspath, e)) + display.warning(f'filetree: Error using stat() on path {abspath} ({e})') return None ret = dict(root=root, path=path) @@ -170,7 +171,7 @@ def file_props(root, path): ret['state'] = 'file' ret['src'] = abspath else: - display.warning('filetree: Error file type of %s is not supported' % abspath) + display.warning(f'filetree: Error file type of {abspath} is not supported') return None ret['uid'] = st.st_uid @@ -183,7 +184,7 @@ def file_props(root, path): ret['group'] = to_text(grp.getgrgid(st.st_gid).gr_name) except KeyError: ret['group'] = st.st_gid - ret['mode'] = '0%03o' % (stat.S_IMODE(st.st_mode)) + ret['mode'] = f'0{stat.S_IMODE(st.st_mode):03o}' ret['size'] = st.st_size ret['mtime'] = st.st_mtime ret['ctime'] = st.st_ctime @@ -210,7 +211,7 @@ class LookupModule(LookupBase): term_file = os.path.basename(term) dwimmed_path = self._loader.path_dwim_relative(basedir, 'files', os.path.dirname(term)) path = os.path.join(dwimmed_path, term_file) - display.debug("Walking '{0}'".format(path)) + display.debug(f"Walking '{path}'") for root, dirs, files in os.walk(path, topdown=True): for entry in dirs + files: relpath = os.path.relpath(os.path.join(root, entry), path) @@ -219,7 +220,7 @@ class LookupModule(LookupBase): if relpath not in [entry['path'] for entry in ret]: props = file_props(path, relpath) if props is not None: - display.debug(" found '{0}'".format(os.path.join(path, relpath))) + display.debug(f" found '{os.path.join(path, relpath)}'") ret.append(props) return ret diff --git a/plugins/lookup/flattened.py b/plugins/lookup/flattened.py index 0f290e559d..0ed92afa27 100644 --- a/plugins/lookup/flattened.py +++ b/plugins/lookup/flattened.py @@ -1,43 +1,40 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2013, Serge van Ginderachter # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: flattened - author: Serge van Ginderachter (!UNKNOWN) - short_description: return single list completely flattened - description: - - Given one or more lists, this lookup will flatten any list elements found recursively until only 1 list is left. - options: - _terms: - description: lists to flatten - type: list - elements: raw - required: true - notes: - - Unlike the R(items lookup,ansible_collections.ansible.builtin.items_lookup) which only flattens 1 level, - this plugin will continue to flatten until it cannot find lists anymore. - - Aka highlander plugin, there can only be one (list). -''' +DOCUMENTATION = r""" +name: flattened +author: Serge van Ginderachter (!UNKNOWN) +short_description: Return single list completely flattened +description: + - Given one or more lists, this lookup flattens any list elements found recursively until only 1 list is left. +options: + _terms: + description: Lists to flatten. + type: list + elements: raw + required: true +notes: + - Unlike the P(ansible.builtin.items#lookup) lookup which only flattens 1 level, this plugin continues to flatten until + it cannot find lists anymore. + - Aka highlander plugin, there can only be one (list). +""" -EXAMPLES = """ +EXAMPLES = r""" - name: "'unnest' all elements into single list" ansible.builtin.debug: msg: "all in one list {{lookup('community.general.flattened', [1,2,3,[5,6]], ['a','b','c'], [[5,6,1,3], [34,'a','b','c']])}}" """ -RETURN = """ - _raw: - description: - - flattened list - type: list +RETURN = r""" +_raw: + description: + - Flattened list. + type: list """ from ansible.errors import AnsibleError -from ansible.module_utils.six import string_types from ansible.plugins.lookup import LookupBase from ansible.utils.listify import listify_lookup_plugin_terms @@ -65,15 +62,15 @@ class LookupModule(LookupBase): # ignore undefined items break - if isinstance(term, string_types): + if isinstance(term, str): # convert a variable to a list - term2 = listify_lookup_plugin_terms(term, templar=self._templar, loader=self._loader) + term2 = listify_lookup_plugin_terms(term, templar=self._templar) # but avoid converting a plain string to a list of one string if term2 != [term]: term = term2 if isinstance(term, list): - # if it's a list, check recursively for items that are a list + # if it is a list, check recursively for items that are a list term = self._do_flatten(term, variables) ret.extend(term) else: diff --git a/plugins/lookup/github_app_access_token.py b/plugins/lookup/github_app_access_token.py new file mode 100644 index 0000000000..0b4f4d53ee --- /dev/null +++ b/plugins/lookup/github_app_access_token.py @@ -0,0 +1,223 @@ +# Copyright (c) 2023, Poh Wei Sheng +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +name: github_app_access_token +author: + - Poh Wei Sheng (@weisheng-p) + - Bruno Lavoie (@blavoie) +short_description: Obtain short-lived Github App Access tokens +version_added: '8.2.0' +requirements: + - jwt (https://github.com/GehirnInc/python-jwt) OR + - PyJWT (https://pypi.org/project/PyJWT/) AND cryptography (https://pypi.org/project/cryptography/) +description: + - This generates a Github access token that can be used with a C(git) command, if you use a Github App. +options: + key_path: + description: + - Path to your private key. + - Either O(key_path) or O(private_key) must be specified. + type: path + app_id: + description: + - Your GitHub App ID, you can find this in the Settings page. + required: true + type: str + installation_id: + description: + - The installation ID that contains the git repository you would like access to. + - As of 2023-12-24, this can be found at Settings page > Integrations > Application. The last part of the URL in the + configure button is the installation ID. + - Alternatively, you can use PyGithub (U(https://github.com/PyGithub/PyGithub)) to get your installation ID. + required: true + type: str + private_key: + description: + - GitHub App private key in PEM file format as string. + - Either O(key_path) or O(private_key) must be specified. + type: str + version_added: 10.0.0 + token_expiry: + description: + - How long the token should last for in seconds. + default: 600 + type: int + github_url: + description: + - Base URL for the GitHub API (for GitHub Enterprise Server). + - "Example: C(https://github-enterprise-server.example.com/api/v3)" + default: https://api.github.com + type: str + version_added: 11.4.0 +""" + +EXAMPLES = r""" +- name: Get access token to be used for git checkout with app_id=123456, installation_id=64209 + ansible.builtin.git: + repo: >- + https://x-access-token:{{ github_token }}@github.com/hidden_user/super-secret-repo.git + dest: /srv/checkout + vars: + github_token: >- + {{ lookup('community.general.github_app_access_token', key_path='/home/to_your/key', + app_id='123456', installation_id='64209') }} +""" + +RETURN = r""" +_raw: + description: A one-element list containing your GitHub access token. + type: list + elements: str +""" + +try: + import jwt + HAS_JWT = True +except ImportError: + HAS_JWT = False + +HAS_PYTHON_JWT = False # vs pyjwt +if HAS_JWT and hasattr(jwt, 'JWT'): + HAS_PYTHON_JWT = True + from jwt import jwk_from_pem, JWT + jwt_instance = JWT() + +try: + from cryptography.hazmat.primitives import serialization + HAS_CRYPTOGRAPHY = True +except ImportError: + HAS_CRYPTOGRAPHY = False + + +import time +import json +from urllib.error import HTTPError + +from ansible.module_utils.urls import open_url +from ansible.errors import AnsibleError, AnsibleOptionsError +from ansible.plugins.lookup import LookupBase +from ansible.utils.display import Display + +display = Display() + + +class PythonJWT: + + @staticmethod + def read_key(path, private_key=None): + try: + if private_key: + return jwk_from_pem(private_key.encode('utf-8')) + with open(path, 'rb') as pem_file: + return jwk_from_pem(pem_file.read()) + except Exception as e: + raise AnsibleError(f"Error while parsing key file: {e}") + + @staticmethod + def encode_jwt(app_id, jwk, exp=600): + now = int(time.time()) + payload = { + 'iat': now, + 'exp': now + exp, + 'iss': app_id, + } + try: + return jwt_instance.encode(payload, jwk, alg='RS256') + except Exception as e: + raise AnsibleError(f"Error while encoding jwt: {e}") + + +def read_key(path, private_key=None): + if HAS_PYTHON_JWT: + return PythonJWT.read_key(path, private_key) + try: + if private_key: + key_bytes = private_key.encode('utf-8') + else: + with open(path, 'rb') as pem_file: + key_bytes = pem_file.read() + return serialization.load_pem_private_key(key_bytes, password=None) + except Exception as e: + raise AnsibleError(f"Error while parsing key file: {e}") + + +def encode_jwt(app_id, private_key_obj, exp=600): + if HAS_PYTHON_JWT: + return PythonJWT.encode_jwt(app_id, private_key_obj) + now = int(time.time()) + payload = { + 'iat': now, + 'exp': now + exp, + 'iss': app_id, + } + try: + return jwt.encode(payload, private_key_obj, algorithm='RS256') + except Exception as e: + raise AnsibleError(f"Error while encoding jwt: {e}") + + +def post_request(generated_jwt, installation_id, api_base): + base = api_base.rstrip('/') + github_url = f"{base}/app/installations/{installation_id}/access_tokens" + + headers = { + "Authorization": f'Bearer {generated_jwt}', + "Accept": "application/vnd.github.v3+json", + } + try: + response = open_url(github_url, headers=headers, method='POST') + except HTTPError as e: + try: + error_body = json.loads(e.read().decode()) + display.vvv(f"Error returned: {error_body}") + except Exception: + error_body = {} + if e.code == 404: + raise AnsibleError("Github return error. Please confirm your installation_id value is valid") + elif e.code == 401: + raise AnsibleError("Github return error. Please confirm your private key is valid") + raise AnsibleError(f"Unexpected data returned: {e} -- {error_body}") + response_body = response.read() + try: + json_data = json.loads(response_body.decode('utf-8')) + except json.decoder.JSONDecodeError as e: + raise AnsibleError(f"Error while dencoding JSON respone from github: {e}") + return json_data.get('token') + + +def get_token(key_path, app_id, installation_id, private_key, github_url, expiry=600): + jwk = read_key(key_path, private_key) + generated_jwt = encode_jwt(app_id, jwk, exp=expiry) + return post_request(generated_jwt, installation_id, github_url) + + +class LookupModule(LookupBase): + def run(self, terms, variables=None, **kwargs): + if not HAS_JWT: + raise AnsibleError('Python jwt library is required. ' + 'Please install using "pip install pyjwt"') + + if not HAS_PYTHON_JWT and not HAS_CRYPTOGRAPHY: + raise AnsibleError('Python cryptography library is required. ' + 'Please install using "pip install cryptography"') + + self.set_options(var_options=variables, direct=kwargs) + + if not (self.get_option("key_path") or self.get_option("private_key")): + raise AnsibleOptionsError("One of key_path or private_key is required") + if self.get_option("key_path") and self.get_option("private_key"): + raise AnsibleOptionsError("key_path and private_key are mutually exclusive") + + t = get_token( + self.get_option('key_path'), + self.get_option('app_id'), + self.get_option('installation_id'), + self.get_option('private_key'), + self.get_option('github_url'), + self.get_option('token_expiry'), + ) + + return [t] diff --git a/plugins/lookup/hiera.py b/plugins/lookup/hiera.py index 1049e80b02..d031987a81 100644 --- a/plugins/lookup/hiera.py +++ b/plugins/lookup/hiera.py @@ -1,43 +1,49 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017, Juan Manuel Parrilla # Copyright (c) 2012-17 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - author: - - Juan Manuel Parrilla (@jparrill) - name: hiera - short_description: get info from hiera data - requirements: - - hiera (command line utility) +DOCUMENTATION = r""" +author: + - Juan Manuel Parrilla (@jparrill) +name: hiera +short_description: Get info from hiera data +requirements: + - hiera (command line utility) +description: + - Retrieves data from an Puppetmaster node using Hiera as ENC. +deprecated: + removed_in: 13.0.0 + why: >- + Hiera has been deprecated a long time ago. + If you disagree with this deprecation, please create an issue in the community.general repository. + alternative: Unknown. +options: + _terms: description: - - Retrieves data from an Puppetmaster node using Hiera as ENC. - options: - _terms: - description: - - The list of keys to lookup on the Puppetmaster. - type: list - elements: string - required: true - executable: - description: - - Binary file to execute Hiera. - default: '/usr/bin/hiera' - env: - - name: ANSIBLE_HIERA_BIN - config_file: - description: - - File that describes the hierarchy of Hiera. - default: '/etc/hiera.yaml' - env: - - name: ANSIBLE_HIERA_CFG + - The list of keys to lookup on the Puppetmaster. + type: list + elements: string + required: true + executable: + description: + - Binary file to execute Hiera. + type: string + default: '/usr/bin/hiera' + env: + - name: ANSIBLE_HIERA_BIN + config_file: + description: + - File that describes the hierarchy of Hiera. + type: string + default: '/etc/hiera.yaml' + env: + - name: ANSIBLE_HIERA_CFG # FIXME: incomplete options .. _terms? environment/fqdn? -''' +""" -EXAMPLES = """ +EXAMPLES = r""" # All this examples depends on hiera.yml that describes the hierarchy - name: "a value from Hiera 'DB'" @@ -53,16 +59,14 @@ EXAMPLES = """ msg: "{{ lookup('community.general.hiera', 'foo fqdn=puppet01.localdomain') }}" """ -RETURN = """ - _raw: - description: - - a value associated with input key - type: list - elements: str +RETURN = r""" +_raw: + description: + - A value associated with input key. + type: list + elements: str """ -import os - from ansible.plugins.lookup import LookupBase from ansible.utils.cmd_functions import run_cmd from ansible.module_utils.common.text.converters import to_text @@ -79,8 +83,7 @@ class Hiera(object): pargs.extend(hiera_key) - rc, output, err = run_cmd("{0} -c {1} {2}".format( - self.hiera_bin, self.hiera_cfg, hiera_key[0])) + rc, output, err = run_cmd(f"{self.hiera_bin} -c {self.hiera_cfg} {hiera_key[0]}") return to_text(output.strip()) diff --git a/plugins/lookup/keyring.py b/plugins/lookup/keyring.py index a4c914ed1a..73fca84e6f 100644 --- a/plugins/lookup/keyring.py +++ b/plugins/lookup/keyring.py @@ -1,24 +1,22 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2016, Samuel Boucher # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: keyring - author: - - Samuel Boucher (!UNKNOWN) - requirements: - - keyring (python library) - short_description: grab secrets from the OS keyring - description: - - Allows you to access data stored in the OS provided keyring/keychain. -''' +DOCUMENTATION = r""" +name: keyring +author: + - Samuel Boucher (!UNKNOWN) +requirements: + - keyring (python library) +short_description: Grab secrets from the OS keyring +description: + - Allows you to access data stored in the OS provided keyring/keychain. +""" -EXAMPLES = """ +EXAMPLES = r""" - name: output secrets to screen (BAD IDEA) ansible.builtin.debug: msg: "Password: {{item}}" @@ -31,11 +29,11 @@ EXAMPLES = """ login_user: joe """ -RETURN = """ - _raw: - description: Secrets stored. - type: list - elements: str +RETURN = r""" +_raw: + description: Secrets stored. + type: list + elements: str """ HAS_KEYRING = True @@ -57,17 +55,17 @@ class LookupModule(LookupBase): def run(self, terms, variables=None, **kwargs): if not HAS_KEYRING: - raise AnsibleError(u"Can't LOOKUP(keyring): missing required python library 'keyring'") + raise AnsibleError("Can't LOOKUP(keyring): missing required python library 'keyring'") self.set_options(var_options=variables, direct=kwargs) - display.vvvv(u"keyring: %s" % keyring.get_keyring()) + display.vvvv(f"keyring: {keyring.get_keyring()}") ret = [] for term in terms: (servicename, username) = (term.split()[0], term.split()[1]) - display.vvvv(u"username: %s, servicename: %s " % (username, servicename)) + display.vvvv(f"username: {username}, servicename: {servicename} ") password = keyring.get_password(servicename, username) if password is None: - raise AnsibleError(u"servicename: %s for user %s not found" % (servicename, username)) + raise AnsibleError(f"servicename: {servicename} for user {username} not found") ret.append(password.rstrip()) return ret diff --git a/plugins/lookup/lastpass.py b/plugins/lookup/lastpass.py index 8eb3090b76..8a3999c372 100644 --- a/plugins/lookup/lastpass.py +++ b/plugins/lookup/lastpass.py @@ -1,44 +1,42 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2016, Andrew Zenk # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: lastpass - author: - - Andrew Zenk (!UNKNOWN) - requirements: - - lpass (command line utility) - - must have already logged into LastPass - short_description: fetch data from LastPass - description: - - Use the lpass command line utility to fetch specific fields from LastPass. - options: - _terms: - description: Key from which you want to retrieve the field. - required: true - type: list - elements: str - field: - description: Field to return from LastPass. - default: 'password' - type: str -''' +DOCUMENTATION = r""" +name: lastpass +author: + - Andrew Zenk (!UNKNOWN) +requirements: + - lpass (command line utility) + - must have already logged into LastPass +short_description: Fetch data from LastPass +description: + - Use the lpass command line utility to fetch specific fields from LastPass. +options: + _terms: + description: Key from which you want to retrieve the field. + required: true + type: list + elements: str + field: + description: Field to return from LastPass. + default: 'password' + type: str +""" -EXAMPLES = """ +EXAMPLES = r""" - name: get 'custom_field' from LastPass entry 'entry-name' ansible.builtin.debug: msg: "{{ lookup('community.general.lastpass', 'entry-name', field='custom_field') }}" """ -RETURN = """ - _raw: - description: secrets stored - type: list - elements: str +RETURN = r""" +_raw: + description: Secrets stored. + type: list + elements: str """ from subprocess import Popen, PIPE @@ -83,9 +81,9 @@ class LPass(object): def get_field(self, key, field): if field in ['username', 'password', 'url', 'notes', 'id', 'name']: - out, err = self._run(self._build_args("show", ["--{0}".format(field), key])) + out, err = self._run(self._build_args("show", [f"--{field}", key])) else: - out, err = self._run(self._build_args("show", ["--field={0}".format(field), key])) + out, err = self._run(self._build_args("show", [f"--field={field}", key])) return out.strip() diff --git a/plugins/lookup/lmdb_kv.py b/plugins/lookup/lmdb_kv.py index 0950249dc8..f9b0d9482f 100644 --- a/plugins/lookup/lmdb_kv.py +++ b/plugins/lookup/lmdb_kv.py @@ -1,35 +1,33 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017-2018, Jan-Piet Mens # Copyright (c) 2018 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: lmdb_kv - author: - - Jan-Piet Mens (@jpmens) - version_added: '0.2.0' - short_description: fetch data from LMDB - description: - - This lookup returns a list of results from an LMDB DB corresponding to a list of items given to it. - requirements: - - lmdb (python library https://lmdb.readthedocs.io/en/release/) - options: - _terms: - description: List of keys to query. - type: list - elements: str - db: - description: Path to LMDB database. - type: str - default: 'ansible.mdb' - vars: - - name: lmdb_kv_db -''' +DOCUMENTATION = r""" +name: lmdb_kv +author: + - Jan-Piet Mens (@jpmens) +version_added: '0.2.0' +short_description: Fetch data from LMDB +description: + - This lookup returns a list of results from an LMDB DB corresponding to a list of items given to it. +requirements: + - lmdb (Python library U(https://lmdb.readthedocs.io/en/release/)) +options: + _terms: + description: List of keys to query. + type: list + elements: str + db: + description: Path to LMDB database. + type: str + default: 'ansible.mdb' + vars: + - name: lmdb_kv_db +""" -EXAMPLES = """ +EXAMPLES = r""" - name: query LMDB for a list of country codes ansible.builtin.debug: msg: "{{ query('community.general.lmdb_kv', 'nl', 'be', 'lu', db='jp.mdb') }}" @@ -40,7 +38,7 @@ EXAMPLES = """ vars: - lmdb_kv_db: jp.mdb with_community.general.lmdb_kv: - - "n*" + - "n*" - name: get an item by key ansible.builtin.assert: @@ -52,9 +50,9 @@ EXAMPLES = """ - be """ -RETURN = """ +RETURN = r""" _raw: - description: value(s) stored in LMDB + description: Value(s) stored in LMDB. type: list elements: raw """ @@ -96,7 +94,7 @@ class LookupModule(LookupBase): try: env = lmdb.open(str(db), readonly=True) except Exception as e: - raise AnsibleError("LMDB can't open database %s: %s" % (db, to_native(e))) + raise AnsibleError(f"LMDB cannot open database {db}: {e}") ret = [] if len(terms) == 0: diff --git a/plugins/lookup/manifold.py b/plugins/lookup/manifold.py deleted file mode 100644 index 51064b9c2b..0000000000 --- a/plugins/lookup/manifold.py +++ /dev/null @@ -1,281 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2018, Arigato Machine Inc. -# Copyright (c) 2018, Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' - author: - - Kyrylo Galanov (!UNKNOWN) - name: manifold - short_description: get credentials from Manifold.co - description: - - Retrieves resources' credentials from Manifold.co - options: - _terms: - description: - - Optional list of resource labels to lookup on Manifold.co. If no resources are specified, all - matched resources will be returned. - type: list - elements: string - required: false - api_token: - description: - - manifold API token - type: string - required: true - env: - - name: MANIFOLD_API_TOKEN - project: - description: - - The project label you want to get the resource for. - type: string - required: false - team: - description: - - The team label you want to get the resource for. - type: string - required: false -''' - -EXAMPLES = ''' - - name: all available resources - ansible.builtin.debug: - msg: "{{ lookup('community.general.manifold', api_token='SecretToken') }}" - - name: all available resources for a specific project in specific team - ansible.builtin.debug: - msg: "{{ lookup('community.general.manifold', api_token='SecretToken', project='poject-1', team='team-2') }}" - - name: two specific resources - ansible.builtin.debug: - msg: "{{ lookup('community.general.manifold', 'resource-1', 'resource-2') }}" -''' - -RETURN = ''' - _raw: - description: - - dictionary of credentials ready to be consumed as environment variables. If multiple resources define - the same environment variable(s), the last one returned by the Manifold API will take precedence. - type: dict -''' -from ansible.errors import AnsibleError -from ansible.plugins.lookup import LookupBase -from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError -from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils import six -from ansible.utils.display import Display -from traceback import format_exception -import json -import sys -import os - -display = Display() - - -class ApiError(Exception): - pass - - -class ManifoldApiClient(object): - base_url = 'https://api.{api}.manifold.co/v1/{endpoint}' - http_agent = 'python-manifold-ansible-1.0.0' - - def __init__(self, token): - self._token = token - - def request(self, api, endpoint, *args, **kwargs): - """ - Send a request to API backend and pre-process a response. - :param api: API to send a request to - :type api: str - :param endpoint: API endpoint to fetch data from - :type endpoint: str - :param args: other args for open_url - :param kwargs: other kwargs for open_url - :return: server response. JSON response is automatically deserialized. - :rtype: dict | list | str - """ - - default_headers = { - 'Authorization': "Bearer {0}".format(self._token), - 'Accept': "*/*" # Otherwise server doesn't set content-type header - } - - url = self.base_url.format(api=api, endpoint=endpoint) - - headers = default_headers - arg_headers = kwargs.pop('headers', None) - if arg_headers: - headers.update(arg_headers) - - try: - display.vvvv('manifold lookup connecting to {0}'.format(url)) - response = open_url(url, headers=headers, http_agent=self.http_agent, *args, **kwargs) - data = response.read() - if response.headers.get('content-type') == 'application/json': - data = json.loads(data) - return data - except ValueError: - raise ApiError('JSON response can\'t be parsed while requesting {url}:\n{json}'.format(json=data, url=url)) - except HTTPError as e: - raise ApiError('Server returned: {err} while requesting {url}:\n{response}'.format( - err=str(e), url=url, response=e.read())) - except URLError as e: - raise ApiError('Failed lookup url for {url} : {err}'.format(url=url, err=str(e))) - except SSLValidationError as e: - raise ApiError('Error validating the server\'s certificate for {url}: {err}'.format(url=url, err=str(e))) - except ConnectionError as e: - raise ApiError('Error connecting to {url}: {err}'.format(url=url, err=str(e))) - - def get_resources(self, team_id=None, project_id=None, label=None): - """ - Get resources list - :param team_id: ID of the Team to filter resources by - :type team_id: str - :param project_id: ID of the project to filter resources by - :type project_id: str - :param label: filter resources by a label, returns a list with one or zero elements - :type label: str - :return: list of resources - :rtype: list - """ - api = 'marketplace' - endpoint = 'resources' - query_params = {} - - if team_id: - query_params['team_id'] = team_id - if project_id: - query_params['project_id'] = project_id - if label: - query_params['label'] = label - - if query_params: - endpoint += '?' + urlencode(query_params) - - return self.request(api, endpoint) - - def get_teams(self, label=None): - """ - Get teams list - :param label: filter teams by a label, returns a list with one or zero elements - :type label: str - :return: list of teams - :rtype: list - """ - api = 'identity' - endpoint = 'teams' - data = self.request(api, endpoint) - # Label filtering is not supported by API, however this function provides uniform interface - if label: - data = list(filter(lambda x: x['body']['label'] == label, data)) - return data - - def get_projects(self, label=None): - """ - Get projects list - :param label: filter projects by a label, returns a list with one or zero elements - :type label: str - :return: list of projects - :rtype: list - """ - api = 'marketplace' - endpoint = 'projects' - query_params = {} - - if label: - query_params['label'] = label - - if query_params: - endpoint += '?' + urlencode(query_params) - - return self.request(api, endpoint) - - def get_credentials(self, resource_id): - """ - Get resource credentials - :param resource_id: ID of the resource to filter credentials by - :type resource_id: str - :return: - """ - api = 'marketplace' - endpoint = 'credentials?' + urlencode({'resource_id': resource_id}) - return self.request(api, endpoint) - - -class LookupModule(LookupBase): - - def run(self, terms, variables=None, **kwargs): - """ - :param terms: a list of resources lookups to run. - :param variables: ansible variables active at the time of the lookup - :param api_token: API token - :param project: optional project label - :param team: optional team label - :return: a dictionary of resources credentials - """ - - self.set_options(var_options=variables, direct=kwargs) - - api_token = self.get_option('api_token') - project = self.get_option('project') - team = self.get_option('team') - - try: - labels = terms - client = ManifoldApiClient(api_token) - - if team: - team_data = client.get_teams(team) - if len(team_data) == 0: - raise AnsibleError("Team '{0}' does not exist".format(team)) - team_id = team_data[0]['id'] - else: - team_id = None - - if project: - project_data = client.get_projects(project) - if len(project_data) == 0: - raise AnsibleError("Project '{0}' does not exist".format(project)) - project_id = project_data[0]['id'] - else: - project_id = None - - if len(labels) == 1: # Use server-side filtering if one resource is requested - resources_data = client.get_resources(team_id=team_id, project_id=project_id, label=labels[0]) - else: # Get all resources and optionally filter labels - resources_data = client.get_resources(team_id=team_id, project_id=project_id) - if labels: - resources_data = list(filter(lambda x: x['body']['label'] in labels, resources_data)) - - if labels and len(resources_data) < len(labels): - fetched_labels = [r['body']['label'] for r in resources_data] - not_found_labels = [label for label in labels if label not in fetched_labels] - raise AnsibleError("Resource(s) {0} do not exist".format(', '.join(not_found_labels))) - - credentials = {} - cred_map = {} - for resource in resources_data: - resource_credentials = client.get_credentials(resource['id']) - if len(resource_credentials) and resource_credentials[0]['body']['values']: - for cred_key, cred_val in six.iteritems(resource_credentials[0]['body']['values']): - label = resource['body']['label'] - if cred_key in credentials: - display.warning("'{cred_key}' with label '{old_label}' was replaced by resource data " - "with label '{new_label}'".format(cred_key=cred_key, - old_label=cred_map[cred_key], - new_label=label)) - credentials[cred_key] = cred_val - cred_map[cred_key] = label - - ret = [credentials] - return ret - except ApiError as e: - raise AnsibleError('API Error: {0}'.format(str(e))) - except AnsibleError as e: - raise e - except Exception: - exc_type, exc_value, exc_traceback = sys.exc_info() - raise AnsibleError(format_exception(exc_type, exc_value, exc_traceback)) diff --git a/plugins/lookup/merge_variables.py b/plugins/lookup/merge_variables.py new file mode 100644 index 0000000000..5c1686b499 --- /dev/null +++ b/plugins/lookup/merge_variables.py @@ -0,0 +1,232 @@ +# Copyright (c) 2020, Thales Netherlands +# Copyright (c) 2021, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +author: + - Roy Lenferink (@rlenferink) + - Mark Ettema (@m-a-r-k-e) + - Alexander Petrenz (@alpex8) +name: merge_variables +short_description: Merge variables whose names match a given pattern +description: + - This lookup returns the merged result of all variables in scope that match the given prefixes, suffixes, or regular expressions, + optionally. +version_added: 6.5.0 +options: + _terms: + description: + - Depending on the value of O(pattern_type), this is a list of prefixes, suffixes, or regular expressions that is used + to match all variables that should be merged. + required: true + type: list + elements: str + pattern_type: + description: + - Change the way of searching for the specified pattern. + type: str + default: 'regex' + choices: + - prefix + - suffix + - regex + env: + - name: ANSIBLE_MERGE_VARIABLES_PATTERN_TYPE + ini: + - section: merge_variables_lookup + key: pattern_type + initial_value: + description: + - An initial value to start with. + type: raw + override: + description: + - Return an error, print a warning or ignore it when a key is overwritten. + - The default behavior V(error) makes the plugin fail when a key would be overwritten. + - When V(warn) and V(ignore) are used, note that it is important to know that the variables are sorted by name before + being merged. Keys for later variables in this order overwrite keys of the same name for variables earlier in this + order. To avoid potential confusion, better use O(override=error) whenever possible. + type: str + default: 'error' + choices: + - error + - warn + - ignore + env: + - name: ANSIBLE_MERGE_VARIABLES_OVERRIDE + ini: + - section: merge_variables_lookup + key: override + groups: + description: + - Search for variables across hosts that belong to the given groups. This allows to collect configuration pieces across + different hosts (for example a service on a host with its database on another host). + type: list + elements: str + version_added: 8.5.0 +""" + +EXAMPLES = r""" +# Some example variables, they can be defined anywhere as long as they are in scope +test_init_list: + - "list init item 1" + - "list init item 2" + +testa__test_list: + - "test a item 1" + +testb__test_list: + - "test b item 1" + +testa__test_dict: + ports: + - 1 + +testb__test_dict: + ports: + - 3 + +# Merge variables that end with '__test_dict' and store the result in a variable 'example_a' +example_a: "{{ lookup('community.general.merge_variables', '__test_dict', pattern_type='suffix') }}" + +# The variable example_a now contains: +# ports: +# - 1 +# - 3 + +# Merge variables that match the '^.+__test_list$' regular expression, starting with an initial value and store the +# result in a variable 'example_b' +example_b: "{{ lookup('community.general.merge_variables', '^.+__test_list$', initial_value=test_init_list) }}" + +# The variable example_b now contains: +# - "list init item 1" +# - "list init item 2" +# - "test a item 1" +# - "test b item 1" +""" + +RETURN = r""" +_raw: + description: In case the search matches list items, a list is returned. In case the search matches dicts, a dict is returned. + type: raw + elements: raw +""" + +import re + +from ansible.errors import AnsibleError +from ansible.plugins.lookup import LookupBase +from ansible.utils.display import Display + +display = Display() + + +def _verify_and_get_type(variable): + if isinstance(variable, list): + return "list" + elif isinstance(variable, dict): + return "dict" + else: + raise AnsibleError("Not supported type detected, variable must be a list or a dict") + + +class LookupModule(LookupBase): + def run(self, terms, variables=None, **kwargs): + self.set_options(direct=kwargs) + initial_value = self.get_option("initial_value", None) + self._override = self.get_option('override', 'error') + self._pattern_type = self.get_option('pattern_type', 'regex') + self._groups = self.get_option('groups', None) + + ret = [] + for term in terms: + if not isinstance(term, str): + raise AnsibleError(f"Non-string type '{type(term)}' passed, only 'str' types are allowed!") + + if not self._groups: # consider only own variables + ret.append(self._merge_vars(term, initial_value, variables)) + else: # consider variables of hosts in given groups + cross_host_merge_result = initial_value + for host in variables["hostvars"]: + if self._is_host_in_allowed_groups(variables["hostvars"][host]["group_names"]): + host_variables = dict(variables["hostvars"].raw_get(host)) + host_variables["hostvars"] = variables["hostvars"] # re-add hostvars + cross_host_merge_result = self._merge_vars(term, cross_host_merge_result, host_variables) + ret.append(cross_host_merge_result) + + return ret + + def _is_host_in_allowed_groups(self, host_groups): + if 'all' in self._groups: + return True + + group_intersection = [host_group_name for host_group_name in host_groups if host_group_name in self._groups] + if group_intersection: + return True + + return False + + def _var_matches(self, key, search_pattern): + if self._pattern_type == "prefix": + return key.startswith(search_pattern) + elif self._pattern_type == "suffix": + return key.endswith(search_pattern) + elif self._pattern_type == "regex": + matcher = re.compile(search_pattern) + return matcher.search(key) + + return False + + def _merge_vars(self, search_pattern, initial_value, variables): + display.vvv(f"Merge variables with {self._pattern_type}: {search_pattern}") + var_merge_names = sorted([key for key in variables.keys() if self._var_matches(key, search_pattern)]) + display.vvv(f"The following variables will be merged: {var_merge_names}") + prev_var_type = None + result = None + + if initial_value is not None: + prev_var_type = _verify_and_get_type(initial_value) + result = initial_value + + for var_name in var_merge_names: + temp_templar = self._templar.copy_with_new_env(available_variables=variables) # tmp. switch renderer to context of current variables + var_value = temp_templar.template(variables[var_name]) # Render jinja2 templates + var_type = _verify_and_get_type(var_value) + + if prev_var_type is None: + prev_var_type = var_type + elif prev_var_type != var_type: + raise AnsibleError("Unable to merge, not all variables are of the same type") + + if result is None: + result = var_value + continue + + if var_type == "dict": + result = self._merge_dict(var_value, result, [var_name]) + else: # var_type == "list" + result += var_value + + return result + + def _merge_dict(self, src, dest, path): + for key, value in src.items(): + if isinstance(value, dict): + node = dest.setdefault(key, {}) + self._merge_dict(value, node, path + [key]) + elif isinstance(value, list) and key in dest: + dest[key] += value + else: + if (key in dest) and dest[key] != value: + msg = f"The key '{key}' with value '{dest[key]}' will be overwritten with value '{value}' from '{'.'.join(path)}.{key}'" + + if self._override == "error": + raise AnsibleError(msg) + if self._override == "warn": + display.warning(msg) + + dest[key] = value + + return dest diff --git a/plugins/lookup/onepassword.py b/plugins/lookup/onepassword.py index 5e9549c2b7..ab68796ed1 100644 --- a/plugins/lookup/onepassword.py +++ b/plugins/lookup/onepassword.py @@ -1,64 +1,44 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2018, Scott Buchanan +# Copyright (c) 2018, Scott Buchanan # Copyright (c) 2016, Andrew Zenk (lastpass.py used as starting point) # Copyright (c) 2018, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: onepassword - author: - - Scott Buchanan (@scottsb) - - Andrew Zenk (@azenk) - - Sam Doran (@samdoran) - requirements: - - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/) - short_description: fetch field values from 1Password - description: - - C(onepassword) wraps the C(op) command line utility to fetch specific field values from 1Password. - options: - _terms: - description: identifier(s) (UUID, name, or subdomain; case-insensitive) of item(s) to retrieve. - required: true - field: - description: field to return from each matching item (case-insensitive). - default: 'password' - master_password: - description: The password used to unlock the specified vault. - aliases: ['vault_password'] - section: - description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from any section. - domain: - description: Domain of 1Password. - version_added: 3.2.0 - default: '1password.com' - type: str - subdomain: - description: The 1Password subdomain to authenticate against. - username: - description: The username used to sign in. - secret_key: - description: The secret key used when performing an initial sign in. - vault: - description: Vault containing the item to retrieve (case-insensitive). If absent will search all vaults. - notes: - - This lookup will use an existing 1Password session if one exists. If not, and you have already - performed an initial sign in (meaning C(~/.op/config), C(~/.config/op/config) or C(~/.config/.op/config) exists), then only the - C(master_password) is required. You may optionally specify C(subdomain) in this scenario, otherwise the last used subdomain will be used by C(op). - - This lookup can perform an initial login by providing C(subdomain), C(username), C(secret_key), and C(master_password). - - Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal credentials - needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or greater in strength - to the 1Password master password. - - This lookup stores potentially sensitive data from 1Password as Ansible facts. - Facts are subject to caching if enabled, which means this data could be stored in clear text - on disk or in a database. - - Tested with C(op) version 2.7.2 -''' +DOCUMENTATION = r""" +name: onepassword +author: + - Scott Buchanan (@scottsb) + - Andrew Zenk (@azenk) + - Sam Doran (@samdoran) +short_description: Fetch field values from 1Password +description: + - P(community.general.onepassword#lookup) wraps the C(op) command line utility to fetch specific field values from 1Password. +requirements: + - C(op) 1Password command line utility +options: + _terms: + description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve. + required: true + type: list + elements: string + account_id: + version_added: 7.5.0 + domain: + version_added: 3.2.0 + field: + description: Field to return from each matching item (case-insensitive). + default: 'password' + type: str + service_account_token: + version_added: 7.1.0 +extends_documentation_fragment: + - community.general.onepassword + - community.general.onepassword.lookup +""" -EXAMPLES = """ +EXAMPLES = r""" # These examples only work when already signed in to 1Password - name: Retrieve password for KITT when already signed in to 1Password ansible.builtin.debug: @@ -74,26 +54,24 @@ EXAMPLES = """ - name: Retrieve password for HAL when not signed in to 1Password ansible.builtin.debug: - var: lookup('community.general.onepassword' - 'HAL 9000' - subdomain='Discovery' - master_password=vault_master_password) + var: lookup('community.general.onepassword', 'HAL 9000', subdomain='Discovery', master_password=vault_master_password) - name: Retrieve password for HAL when never signed in to 1Password ansible.builtin.debug: - var: lookup('community.general.onepassword' - 'HAL 9000' - subdomain='Discovery' - master_password=vault_master_password - username='tweety@acme.com' - secret_key=vault_secret_key) + var: >- + lookup('community.general.onepassword', 'HAL 9000', subdomain='Discovery', master_password=vault_master_password, + username='tweety@acme.com', secret_key=vault_secret_key) + +- name: Retrieve password from specific account + ansible.builtin.debug: + var: lookup('community.general.onepassword', 'HAL 9000', account_id='abc123') """ -RETURN = """ - _raw: - description: field data requested - type: list - elements: str +RETURN = r""" +_raw: + description: Field data requested. + type: list + elements: str """ import abc @@ -102,38 +80,60 @@ import json import subprocess from ansible.plugins.lookup import LookupBase -from ansible.errors import AnsibleLookupError +from ansible.errors import AnsibleLookupError, AnsibleOptionsError from ansible.module_utils.common.process import get_bin_path from ansible.module_utils.common.text.converters import to_bytes, to_text -from ansible.module_utils.six import with_metaclass from ansible_collections.community.general.plugins.module_utils.onepassword import OnePasswordConfig -class OnePassCLIBase(with_metaclass(abc.ABCMeta, object)): +def _lower_if_possible(value): + """Return the lower case version value, otherwise return the value""" + try: + return value.lower() + except AttributeError: + return value + + +class OnePassCLIBase(object, metaclass=abc.ABCMeta): bin = "op" - def __init__(self, subdomain=None, domain="1password.com", username=None, secret_key=None, master_password=None): + def __init__( + self, + subdomain=None, + domain="1password.com", + username=None, + secret_key=None, + master_password=None, + service_account_token=None, + account_id=None, + connect_host=None, + connect_token=None, + ): self.subdomain = subdomain self.domain = domain self.username = username self.master_password = master_password self.secret_key = secret_key + self.service_account_token = service_account_token + self.account_id = account_id + self.connect_host = connect_host + self.connect_token = connect_token self._path = None self._version = None def _check_required_params(self, required_params): - non_empty_attrs = dict((param, getattr(self, param, None)) for param in required_params if getattr(self, param, None)) + non_empty_attrs = {param: getattr(self, param) for param in required_params if getattr(self, param, None)} missing = set(required_params).difference(non_empty_attrs) if missing: prefix = "Unable to sign in to 1Password. Missing required parameter" plural = "" - suffix = ": {params}.".format(params=", ".join(missing)) + suffix = f": {', '.join(missing)}." if len(missing) > 1: plural = "s" - msg = "{prefix}{plural}{suffix}".format(prefix=prefix, plural=plural, suffix=suffix) + msg = f"{prefix}{plural}{suffix}" raise AnsibleLookupError(msg) @abc.abstractmethod @@ -158,7 +158,7 @@ class OnePassCLIBase(with_metaclass(abc.ABCMeta, object)): rc = p.wait() if not ignore_errors and rc != expected_rc: - raise AnsibleLookupError(to_text(err)) + raise AnsibleLookupError(str(err)) return rc, out, err @@ -199,12 +199,12 @@ class OnePassCLIBase(with_metaclass(abc.ABCMeta, object)): try: bin_path = get_bin_path(cls.bin) except ValueError: - raise AnsibleLookupError("Unable to locate '%s' command line tool" % cls.bin) + raise AnsibleLookupError(f"Unable to locate '{cls.bin}' command line tool") try: b_out = subprocess.check_output([bin_path, "--version"], stderr=subprocess.PIPE) except subprocess.CalledProcessError as cpe: - raise AnsibleLookupError("Unable to get the op version: %s" % cpe) + raise AnsibleLookupError(f"Unable to get the op version: {cpe}") return to_text(b_out).strip() @@ -286,8 +286,10 @@ class OnePassCLIv1(OnePassCLIBase): def assert_logged_in(self): args = ["get", "account"] - if self.subdomain: - account = "{subdomain}.{domain}".format(subdomain=self.subdomain, domain=self.domain) + if self.account_id: + args.extend(["--account", self.account_id]) + elif self.subdomain: + account = f"{self.subdomain}.{self.domain}" args.extend(["--account", account]) rc, out, err = self._run(args, ignore_errors=True) @@ -295,6 +297,14 @@ class OnePassCLIv1(OnePassCLIBase): return not bool(rc) def full_signin(self): + if self.connect_host or self.connect_token: + raise AnsibleLookupError( + "1Password Connect is not available with 1Password CLI version 1. Please use version 2 or later.") + + if self.service_account_token: + raise AnsibleLookupError( + "1Password CLI version 1 does not support Service Accounts. Please use version 2 or later.") + required_params = [ "subdomain", "username", @@ -305,7 +315,7 @@ class OnePassCLIv1(OnePassCLIBase): args = [ "signin", - "{0}.{1}".format(self.subdomain, self.domain), + f"{self.subdomain}.{self.domain}", to_bytes(self.username), to_bytes(self.secret_key), "--raw", @@ -315,8 +325,12 @@ class OnePassCLIv1(OnePassCLIBase): def get_raw(self, item_id, vault=None, token=None): args = ["get", "item", item_id] + + if self.account_id: + args.extend(["--account", self.account_id]) + if vault is not None: - args += ["--vault={0}".format(vault)] + args += [f"--vault={vault}"] if token is not None: args += [to_bytes("--session=") + token] @@ -442,6 +456,7 @@ class OnePassCLIv2(OnePassCLIBase): } """ data = json.loads(data_json) + field_name = _lower_if_possible(field_name) for field in data.get("fields", []): if section_title is None: # If the field name exists in the section, return that value @@ -450,31 +465,43 @@ class OnePassCLIv2(OnePassCLIBase): # If the field name doesn't exist in the section, match on the value of "label" # then "id" and return "value" - if field.get("label") == field_name: - return field["value"] + if field.get("label", "").lower() == field_name: + return field.get("value", "") - if field.get("id") == field_name: - return field["value"] + if field.get("id", "").lower() == field_name: + return field.get("value", "") - # Look at the section data and get an indentifier. The value of 'id' is either a unique ID + # Look at the section data and get an identifier. The value of 'id' is either a unique ID # or a human-readable string. If a 'label' field exists, prefer that since # it is the value visible in the 1Password UI when both 'id' and 'label' exist. section = field.get("section", {}) - current_section_title = section.get("label", section.get("id")) + section_title = _lower_if_possible(section_title) + + current_section_title = section.get("label", section.get("id", "")).lower() if section_title == current_section_title: # In the correct section. Check "label" then "id" for the desired field_name - if field.get("label") == field_name: - return field["value"] + if field.get("label", "").lower() == field_name: + return field.get("value", "") - if field.get("id") == field_name: - return field["value"] + if field.get("id", "").lower() == field_name: + return field.get("value", "") return "" def assert_logged_in(self): + if self.connect_host and self.connect_token: + return True + + if self.service_account_token: + args = ["whoami"] + environment_update = {"OP_SERVICE_ACCOUNT_TOKEN": self.service_account_token} + rc, out, err = self._run(args, environment_update=environment_update) + + return not bool(rc) + args = ["account", "list"] if self.subdomain: - account = "{subdomain}.{domain}".format(subdomain=self.subdomain, domain=self.domain) + account = f"{self.subdomain}.{self.domain}" args.extend(["--account", account]) rc, out, err = self._run(args) @@ -484,11 +511,13 @@ class OnePassCLIv2(OnePassCLIBase): # an interactive prompt. Only run 'op account get' after first listing accounts to see # if there are any previously configured accounts. args = ["account", "get"] - if self.subdomain: - account = "{subdomain}.{domain}".format(subdomain=self.subdomain, domain=self.domain) + if self.account_id: + args.extend(["--account", self.account_id]) + elif self.subdomain: + account = f"{self.subdomain}.{self.domain}" args.extend(["--account", account]) - rc, out, err = self._run(args) + rc, out, err = self._run(args, ignore_errors=True) return not bool(rc) @@ -505,7 +534,7 @@ class OnePassCLIv2(OnePassCLIBase): args = [ "account", "add", "--raw", - "--address", "{0}.{1}".format(self.subdomain, self.domain), + "--address", f"{self.subdomain}.{self.domain}", "--email", to_bytes(self.username), "--signin", ] @@ -513,15 +542,37 @@ class OnePassCLIv2(OnePassCLIBase): environment_update = {"OP_SECRET_KEY": self.secret_key} return self._run(args, command_input=to_bytes(self.master_password), environment_update=environment_update) - def get_raw(self, item_id, vault=None, token=None): - args = ["item", "get", item_id, "--format", "json"] + def _add_parameters_and_run(self, args, vault=None, token=None): + if self.account_id: + args.extend(["--account", self.account_id]) + if vault is not None: - args += ["--vault={0}".format(vault)] + args += [f"--vault={vault}"] + + if self.connect_host and self.connect_token: + if vault is None: + raise AnsibleLookupError("'vault' is required with 1Password Connect") + environment_update = { + "OP_CONNECT_HOST": self.connect_host, + "OP_CONNECT_TOKEN": self.connect_token, + } + return self._run(args, environment_update=environment_update) + + if self.service_account_token: + if vault is None: + raise AnsibleLookupError("'vault' is required with 'service_account_token'") + environment_update = {"OP_SERVICE_ACCOUNT_TOKEN": self.service_account_token} + return self._run(args, environment_update=environment_update) + if token is not None: args += [to_bytes("--session=") + token] return self._run(args) + def get_raw(self, item_id, vault=None, token=None): + args = ["item", "get", item_id, "--format", "json"] + return self._add_parameters_and_run(args, vault=vault, token=token) + def signin(self): self._check_required_params(['master_password']) @@ -533,29 +584,41 @@ class OnePassCLIv2(OnePassCLIBase): class OnePass(object): - def __init__(self, subdomain=None, domain="1password.com", username=None, secret_key=None, master_password=None): + def __init__(self, subdomain=None, domain="1password.com", username=None, secret_key=None, master_password=None, + service_account_token=None, account_id=None, connect_host=None, connect_token=None, cli_class=None): self.subdomain = subdomain self.domain = domain self.username = username self.secret_key = secret_key self.master_password = master_password + self.service_account_token = service_account_token + self.account_id = account_id + self.connect_host = connect_host + self.connect_token = connect_token self.logged_in = False self.token = None self._config = OnePasswordConfig() - self._cli = self._get_cli_class() + self._cli = self._get_cli_class(cli_class) + + if (self.connect_host or self.connect_token) and None in (self.connect_host, self.connect_token): + raise AnsibleOptionsError("connect_host and connect_token are required together") + + def _get_cli_class(self, cli_class=None): + if cli_class is not None: + return cli_class(self.subdomain, self.domain, self.username, self.secret_key, self.master_password, self.service_account_token) - def _get_cli_class(self): version = OnePassCLIBase.get_current_version() for cls in OnePassCLIBase.__subclasses__(): if cls.supports_version == version.split(".")[0]: try: - return cls(self.subdomain, self.domain, self.username, self.secret_key, self.master_password) + return cls(self.subdomain, self.domain, self.username, self.secret_key, self.master_password, self.service_account_token, + self.account_id, self.connect_host, self.connect_token) except TypeError as e: raise AnsibleLookupError(e) - raise AnsibleLookupError("op version %s is unsupported" % version) + raise AnsibleLookupError(f"op version {version} is unsupported") def set_token(self): if self._config.config_file_path and os.path.isfile(self._config.config_file_path): @@ -614,8 +677,22 @@ class LookupModule(LookupBase): username = self.get_option("username") secret_key = self.get_option("secret_key") master_password = self.get_option("master_password") + service_account_token = self.get_option("service_account_token") + account_id = self.get_option("account_id") + connect_host = self.get_option("connect_host") + connect_token = self.get_option("connect_token") - op = OnePass(subdomain, domain, username, secret_key, master_password) + op = OnePass( + subdomain=subdomain, + domain=domain, + username=username, + secret_key=secret_key, + master_password=master_password, + service_account_token=service_account_token, + account_id=account_id, + connect_host=connect_host, + connect_token=connect_token, + ) op.assert_logged_in() values = [] diff --git a/plugins/lookup/onepassword_doc.py b/plugins/lookup/onepassword_doc.py new file mode 100644 index 0000000000..e62db6d1e2 --- /dev/null +++ b/plugins/lookup/onepassword_doc.py @@ -0,0 +1,89 @@ +# Copyright (c) 2023, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: onepassword_doc +author: + - Sam Doran (@samdoran) +requirements: + - C(op) 1Password command line utility version 2 or later. +short_description: Fetch documents stored in 1Password +version_added: "8.1.0" +description: + - P(community.general.onepassword_doc#lookup) wraps C(op) command line utility to fetch one or more documents from 1Password. +notes: + - The document contents are a string exactly as stored in 1Password. + - This plugin requires C(op) version 2 or later. +options: + _terms: + description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve. + required: true + type: list + elements: string + +extends_documentation_fragment: + - community.general.onepassword + - community.general.onepassword.lookup +""" + +EXAMPLES = r""" +--- +- name: Retrieve a private key from 1Password + ansible.builtin.debug: + var: lookup('community.general.onepassword_doc', 'Private key') +""" + +RETURN = r""" +_raw: + description: Requested document. + type: list + elements: string +""" + +from ansible_collections.community.general.plugins.lookup.onepassword import OnePass, OnePassCLIv2 +from ansible.plugins.lookup import LookupBase + + +class OnePassCLIv2Doc(OnePassCLIv2): + def get_raw(self, item_id, vault=None, token=None): + args = ["document", "get", item_id] + return self._add_parameters_and_run(args, vault=vault, token=token) + + +class LookupModule(LookupBase): + def run(self, terms, variables=None, **kwargs): + self.set_options(var_options=variables, direct=kwargs) + + vault = self.get_option("vault") + subdomain = self.get_option("subdomain") + domain = self.get_option("domain", "1password.com") + username = self.get_option("username") + secret_key = self.get_option("secret_key") + master_password = self.get_option("master_password") + service_account_token = self.get_option("service_account_token") + account_id = self.get_option("account_id") + connect_host = self.get_option("connect_host") + connect_token = self.get_option("connect_token") + + op = OnePass( + subdomain=subdomain, + domain=domain, + username=username, + secret_key=secret_key, + master_password=master_password, + service_account_token=service_account_token, + account_id=account_id, + connect_host=connect_host, + connect_token=connect_token, + cli_class=OnePassCLIv2Doc, + ) + op.assert_logged_in() + + values = [] + for term in terms: + values.append(op.get_raw(term, vault)) + + return values diff --git a/plugins/lookup/onepassword_raw.py b/plugins/lookup/onepassword_raw.py index 9b87a3f619..b75be3d630 100644 --- a/plugins/lookup/onepassword_raw.py +++ b/plugins/lookup/onepassword_raw.py @@ -1,61 +1,41 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Scott Buchanan # Copyright (c) 2016, Andrew Zenk (lastpass.py used as starting point) # Copyright (c) 2018, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: onepassword_raw - author: - - Scott Buchanan (@scottsb) - - Andrew Zenk (@azenk) - - Sam Doran (@samdoran) - requirements: - - C(op) 1Password command line utility. See U(https://support.1password.com/command-line/) - short_description: fetch an entire item from 1Password - description: - - C(onepassword_raw) wraps C(op) command line utility to fetch an entire item from 1Password - options: - _terms: - description: identifier(s) (UUID, name, or domain; case-insensitive) of item(s) to retrieve. - required: true - master_password: - description: The password used to unlock the specified vault. - aliases: ['vault_password'] - section: - description: Item section containing the field to retrieve (case-insensitive). If absent will return first match from any section. - subdomain: - description: The 1Password subdomain to authenticate against. - domain: - description: Domain of 1Password. - version_added: 6.0.0 - default: '1password.com' - type: str - username: - description: The username used to sign in. - secret_key: - description: The secret key used when performing an initial sign in. - vault: - description: Vault containing the item to retrieve (case-insensitive). If absent will search all vaults. - notes: - - This lookup will use an existing 1Password session if one exists. If not, and you have already - performed an initial sign in (meaning C(~/.op/config exists)), then only the C(master_password) is required. - You may optionally specify C(subdomain) in this scenario, otherwise the last used subdomain will be used by C(op). - - This lookup can perform an initial login by providing C(subdomain), C(username), C(secret_key), and C(master_password). - - Due to the B(very) sensitive nature of these credentials, it is B(highly) recommended that you only pass in the minimal credentials - needed at any given time. Also, store these credentials in an Ansible Vault using a key that is equal to or greater in strength - to the 1Password master password. - - This lookup stores potentially sensitive data from 1Password as Ansible facts. - Facts are subject to caching if enabled, which means this data could be stored in clear text - on disk or in a database. - - Tested with C(op) version 2.7.0 -''' +DOCUMENTATION = r""" +name: onepassword_raw +author: + - Scott Buchanan (@scottsb) + - Andrew Zenk (@azenk) + - Sam Doran (@samdoran) +requirements: + - C(op) 1Password command line utility +short_description: Fetch an entire item from 1Password +description: + - P(community.general.onepassword_raw#lookup) wraps C(op) command line utility to fetch an entire item from 1Password. +options: + _terms: + description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve. + required: true + type: list + elements: string + account_id: + version_added: 7.5.0 + domain: + version_added: 6.0.0 + service_account_token: + version_added: 7.1.0 +extends_documentation_fragment: + - community.general.onepassword + - community.general.onepassword.lookup +""" -EXAMPLES = """ +EXAMPLES = r""" +--- - name: Retrieve all data about Wintermute ansible.builtin.debug: var: lookup('community.general.onepassword_raw', 'Wintermute') @@ -65,11 +45,11 @@ EXAMPLES = """ var: lookup('community.general.onepassword_raw', 'Wintermute', subdomain='Turing', vault_password='DmbslfLvasjdl') """ -RETURN = """ - _raw: - description: field data requested - type: list - elements: dict +RETURN = r""" +_raw: + description: Entire item requested. + type: list + elements: dict """ import json @@ -89,8 +69,22 @@ class LookupModule(LookupBase): username = self.get_option("username") secret_key = self.get_option("secret_key") master_password = self.get_option("master_password") + service_account_token = self.get_option("service_account_token") + account_id = self.get_option("account_id") + connect_host = self.get_option("connect_host") + connect_token = self.get_option("connect_token") - op = OnePass(subdomain, domain, username, secret_key, master_password) + op = OnePass( + subdomain=subdomain, + domain=domain, + username=username, + secret_key=secret_key, + master_password=master_password, + service_account_token=service_account_token, + account_id=account_id, + connect_host=connect_host, + connect_token=connect_token, + ) op.assert_logged_in() values = [] diff --git a/plugins/lookup/onepassword_ssh_key.py b/plugins/lookup/onepassword_ssh_key.py new file mode 100644 index 0000000000..35e3034e04 --- /dev/null +++ b/plugins/lookup/onepassword_ssh_key.py @@ -0,0 +1,118 @@ +# Copyright (c) 2025, Ansible Project +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +name: onepassword_ssh_key +author: + - Mohammed Babelly (@mohammedbabelly20) +requirements: + - C(op) 1Password command line utility version 2 or later. +short_description: Fetch SSH keys stored in 1Password +version_added: "10.3.0" +description: + - P(community.general.onepassword_ssh_key#lookup) wraps C(op) command line utility to fetch SSH keys from 1Password. +notes: + - By default, it returns the private key value in PKCS#8 format, unless O(ssh_format=true) is passed. + - The pluging works only for C(SSHKEY) type items. + - This plugin requires C(op) version 2 or later. +options: + _terms: + description: Identifier(s) (case-insensitive UUID or name) of item(s) to retrieve. + required: true + type: list + elements: string + ssh_format: + description: Output key in SSH format if V(true). Otherwise, outputs in the default format (PKCS#8). + default: false + type: bool + +extends_documentation_fragment: + - community.general.onepassword + - community.general.onepassword.lookup +""" + +EXAMPLES = r""" +--- +- name: Retrieve the private SSH key from 1Password + ansible.builtin.debug: + msg: "{{ lookup('community.general.onepassword_ssh_key', 'SSH Key', ssh_format=true) }}" +""" + +RETURN = r""" +_raw: + description: Private key of SSH keypair. + type: list + elements: string +""" +import json + +from ansible_collections.community.general.plugins.lookup.onepassword import ( + OnePass, + OnePassCLIv2, +) +from ansible.errors import AnsibleLookupError +from ansible.plugins.lookup import LookupBase + + +class LookupModule(LookupBase): + def get_ssh_key(self, out, item_id, ssh_format=False): + data = json.loads(out) + + if data.get("category") != "SSH_KEY": + raise AnsibleLookupError(f"Item {item_id} is not an SSH key") + + private_key_field = next( + ( + field + for field in data.get("fields", {}) + if field.get("id") == "private_key" and field.get("type") == "SSHKEY" + ), + None, + ) + if not private_key_field: + raise AnsibleLookupError(f"No private key found for item {item_id}.") + + if ssh_format: + return ( + private_key_field.get("ssh_formats", {}) + .get("openssh", {}) + .get("value", "") + ) + return private_key_field.get("value", "") + + def run(self, terms, variables=None, **kwargs): + self.set_options(var_options=variables, direct=kwargs) + + ssh_format = self.get_option("ssh_format") + vault = self.get_option("vault") + subdomain = self.get_option("subdomain") + domain = self.get_option("domain", "1password.com") + username = self.get_option("username") + secret_key = self.get_option("secret_key") + master_password = self.get_option("master_password") + service_account_token = self.get_option("service_account_token") + account_id = self.get_option("account_id") + connect_host = self.get_option("connect_host") + connect_token = self.get_option("connect_token") + + op = OnePass( + subdomain=subdomain, + domain=domain, + username=username, + secret_key=secret_key, + master_password=master_password, + service_account_token=service_account_token, + account_id=account_id, + connect_host=connect_host, + connect_token=connect_token, + cli_class=OnePassCLIv2, + ) + op.assert_logged_in() + + return [ + self.get_ssh_key(op.get_raw(term, vault), term, ssh_format=ssh_format) + for term in terms + ] diff --git a/plugins/lookup/passwordstore.py b/plugins/lookup/passwordstore.py index 6face16f39..31305d81bb 100644 --- a/plugins/lookup/passwordstore.py +++ b/plugins/lookup/passwordstore.py @@ -1,144 +1,172 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017, Patrick Deelman # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: passwordstore - author: - - Patrick Deelman (!UNKNOWN) - short_description: manage passwords with passwordstore.org's pass utility +DOCUMENTATION = r""" +name: passwordstore +author: + - Patrick Deelman (!UNKNOWN) +short_description: Manage passwords with passwordstore.org's pass utility +description: + - Enables Ansible to retrieve, create or update passwords from the passwordstore.org pass utility. It can also retrieve, + create or update YAML style keys stored as multilines in the passwordfile. + - To avoid problems when accessing multiple secrets at once, add C(auto-expand-secmem) to C(~/.gnupg/gpg-agent.conf). Where + this is not possible, consider using O(lock=readwrite) instead. +options: + _terms: + description: Query key. + required: true + directory: description: - - Enables Ansible to retrieve, create or update passwords from the passwordstore.org pass utility. - It also retrieves YAML style keys stored as multilines in the passwordfile. - - To avoid problems when accessing multiple secrets at once, add C(auto-expand-secmem) to - C(~/.gnupg/gpg-agent.conf). Where this is not possible, consider using I(lock=readwrite) instead. - options: - _terms: - description: query key. - required: true - directory: - description: - - The directory of the password store. - - If I(backend=pass), the default is C(~/.password-store) is used. - - If I(backend=gopass), then the default is the C(path) field in C(~/.config/gopass/config.yml), - falling back to C(~/.local/share/gopass/stores/root) if C(path) is not defined in the gopass config. - type: path - vars: - - name: passwordstore - env: - - name: PASSWORD_STORE_DIR - create: - description: Create the password if it does not already exist. Takes precedence over C(missing). - type: bool - default: false - overwrite: - description: Overwrite the password if it does already exist. - type: bool - default: false - umask: - description: - - Sets the umask for the created .gpg files. The first octed must be greater than 3 (user readable). - - Note pass' default value is C('077'). - env: - - name: PASSWORD_STORE_UMASK - version_added: 1.3.0 - returnall: - description: Return all the content of the password, not only the first line. - type: bool - default: false - subkey: - description: Return a specific subkey of the password. When set to C(password), always returns the first line. - type: str - default: password - userpass: - description: Specify a password to save, instead of a generated one. - type: str - length: - description: The length of the generated password. - type: integer - default: 16 - backup: - description: Used with C(overwrite=true). Backup the previous password in a subkey. - type: bool - default: false - nosymbols: - description: Use alphanumeric characters. - type: bool - default: false - missing: - description: - - List of preference about what to do if the password file is missing. - - If I(create=true), the value for this option is ignored and assumed to be C(create). - - If set to C(error), the lookup will error out if the passname does not exist. - - If set to C(create), the passname will be created with the provided length I(length) if it does not exist. - - If set to C(empty) or C(warn), will return a C(none) in case the passname does not exist. - When using C(lookup) and not C(query), this will be translated to an empty string. - version_added: 3.1.0 - type: str - default: error - choices: - - error - - warn - - empty - - create - lock: - description: - - How to synchronize operations. - - The default of C(write) only synchronizes write operations. - - C(readwrite) synchronizes all operations (including read). This makes sure that gpg-agent is never called in parallel. - - C(none) does not do any synchronization. - ini: - - section: passwordstore_lookup - key: lock - type: str - default: write - choices: - - readwrite - - write - - none - version_added: 4.5.0 - locktimeout: - description: - - Lock timeout applied when I(lock) is not C(none). - - Time with a unit suffix, C(s), C(m), C(h) for seconds, minutes, and hours, respectively. For example, C(900s) equals C(15m). - - Correlates with C(pinentry-timeout) in C(~/.gnupg/gpg-agent.conf), see C(man gpg-agent) for details. - ini: - - section: passwordstore_lookup - key: locktimeout - type: str - default: 15m - version_added: 4.5.0 - backend: - description: - - Specify which backend to use. - - Defaults to C(pass), passwordstore.org's original pass utility. - - C(gopass) support is incomplete. - ini: - - section: passwordstore_lookup - key: backend - vars: - - name: passwordstore_backend - type: str - default: pass - choices: - - pass - - gopass - version_added: 5.2.0 - notes: - - The lookup supports passing all options as lookup parameters since community.general 6.0.0. -''' -EXAMPLES = """ + - The directory of the password store. + - If O(backend=pass), the default is V(~/.password-store) is used. + - If O(backend=gopass), then the default is the C(path) field in C(~/.config/gopass/config.yml), falling back to V(~/.local/share/gopass/stores/root) + if C(path) is not defined in the gopass config. + type: path + vars: + - name: passwordstore + env: + - name: PASSWORD_STORE_DIR + create: + description: Create the password or the subkey if it does not already exist. Takes precedence over O(missing). + type: bool + default: false + overwrite: + description: Overwrite the password or the subkey if it does already exist. + type: bool + default: false + umask: + description: + - Sets the umask for the created V(.gpg) files. The first octed must be greater than 3 (user readable). + - Note pass' default value is V('077'). + type: string + env: + - name: PASSWORD_STORE_UMASK + version_added: 1.3.0 + returnall: + description: Return all the content of the password, not only the first line. + type: bool + default: false + subkey: + description: + - By default return a specific subkey of the password. When set to V(password), always returns the first line. + - With O(overwrite=true), it creates the subkey and returns it. + type: str + default: password + userpass: + description: Specify a password to save, instead of a generated one. + type: str + length: + description: The length of the generated password. + type: integer + default: 16 + backup: + description: Used with O(overwrite=true). Backup the previous password or subkey in a subkey. + type: bool + default: false + nosymbols: + description: Use alphanumeric characters. + type: bool + default: false + missing: + description: + - List of preference about what to do if the password file is missing. + - If O(create=true), the value for this option is ignored and assumed to be V(create). + - If set to V(error), the lookup fails out if the passname does not exist. + - If set to V(create), the passname is created with the provided length O(length) if it does not exist. + - If set to V(empty) or V(warn), it returns a V(none) in case the passname does not exist. When using C(lookup) and + not C(query), this is translated to an empty string. + version_added: 3.1.0 + type: str + default: error + choices: + - error + - warn + - empty + - create + lock: + description: + - How to synchronize operations. + - The default of V(write) only synchronizes write operations. + - V(readwrite) synchronizes all operations (including read). This makes sure that gpg-agent is never called in parallel. + - V(none) does not do any synchronization. + ini: + - section: passwordstore_lookup + key: lock + type: str + default: write + choices: + - readwrite + - write + - none + version_added: 4.5.0 + locktimeout: + description: + - Lock timeout applied when O(lock) is not V(none). + - Time with a unit suffix, V(s), V(m), V(h) for seconds, minutes, and hours, respectively. For example, V(900s) equals + V(15m). + - Correlates with C(pinentry-timeout) in C(~/.gnupg/gpg-agent.conf), see C(man gpg-agent) for details. + ini: + - section: passwordstore_lookup + key: locktimeout + type: str + default: 15m + version_added: 4.5.0 + backend: + description: + - Specify which backend to use. + - Defaults to V(pass), passwordstore.org's original pass utility. + - V(gopass) support is incomplete. + ini: + - section: passwordstore_lookup + key: backend + vars: + - name: passwordstore_backend + type: str + default: pass + choices: + - pass + - gopass + version_added: 5.2.0 + timestamp: + description: Add the password generation information to the end of the file. + type: bool + default: true + version_added: 8.1.0 + preserve: + description: Include the old (edited) password inside the pass file. + type: bool + default: true + version_added: 8.1.0 + missing_subkey: + description: + - Preference about what to do if the password subkey is missing. + - If set to V(error), the lookup fails out if the subkey does not exist. + - If set to V(empty) or V(warn), it returns a V(none) in case the subkey does not exist. + version_added: 8.6.0 + type: str + default: empty + choices: + - error + - warn + - empty + ini: + - section: passwordstore_lookup + key: missing_subkey +notes: + - The lookup supports passing all options as lookup parameters since community.general 6.0.0. +""" +EXAMPLES = r""" ansible.cfg: | [passwordstore_lookup] lock=readwrite locktimeout=45s + missing_subkey=warn -tasks.yml: | +tasks.yml: |- --- # Debug is used for examples, BAD IDEA to show passwords on screen @@ -162,6 +190,17 @@ tasks.yml: | vars: mypassword: "{{ lookup('community.general.passwordstore', 'example/test', missing='create')}}" + - name: >- + Create a random 16 character password in a subkey. If the password file already exists, just add the subkey in it. + If the subkey exists, returns it + ansible.builtin.debug: + msg: "{{ lookup('community.general.passwordstore', 'example/test', create=true, subkey='foo') }}" + + - name: >- + Create a random 16 character password in a subkey. Overwrite if it already exists and backup the old one. + ansible.builtin.debug: + msg: "{{ lookup('community.general.passwordstore', 'example/test', create=true, subkey='user', overwrite=true, backup=true) }}" + - name: Prints 'abc' if example/test does not exist, just give the password otherwise ansible.builtin.debug: var: mypassword @@ -193,10 +232,10 @@ tasks.yml: | passfilecontent: "{{ lookup('community.general.passwordstore', 'example/test', returnall=true)}}" """ -RETURN = """ +RETURN = r""" _raw: description: - - a password + - A password. type: list elements: str """ @@ -209,7 +248,6 @@ import time import yaml from ansible.errors import AnsibleError, AnsibleAssertionError -from ansible.module_utils.common.file import FileLock from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text from ansible.module_utils.parsing.convert_bool import boolean from ansible.utils.display import Display @@ -217,6 +255,8 @@ from ansible.utils.encrypt import random_password from ansible.plugins.lookup import LookupBase from ansible import constants as C +from ansible_collections.community.general.plugins.module_utils._filelock import FileLock + display = Display() @@ -274,7 +314,7 @@ class LookupModule(LookupBase): ) self.realpass = 'pass: the standard unix password manager' in passoutput except (subprocess.CalledProcessError) as e: - raise AnsibleError('exit code {0} while running {1}. Error output: {2}'.format(e.returncode, e.cmd, e.output)) + raise AnsibleError(f'exit code {e.returncode} while running {e.cmd}. Error output: {e.output}') return self.realpass @@ -291,7 +331,7 @@ class LookupModule(LookupBase): for param in params[1:]: name, value = param.split('=', 1) if name not in self.paramvals: - raise AnsibleAssertionError('%s not in paramvals' % name) + raise AnsibleAssertionError(f'{name} not in paramvals') self.paramvals[name] = value except (ValueError, AssertionError) as e: raise AnsibleError(e) @@ -303,12 +343,12 @@ class LookupModule(LookupBase): except (ValueError, AssertionError) as e: raise AnsibleError(e) if self.paramvals['missing'] not in ['error', 'warn', 'create', 'empty']: - raise AnsibleError("{0} is not a valid option for missing".format(self.paramvals['missing'])) + raise AnsibleError(f"{self.paramvals['missing']} is not a valid option for missing") if not isinstance(self.paramvals['length'], int): if self.paramvals['length'].isdigit(): self.paramvals['length'] = int(self.paramvals['length']) else: - raise AnsibleError("{0} is not a correct value for length".format(self.paramvals['length'])) + raise AnsibleError(f"{self.paramvals['length']} is not a correct value for length") if self.paramvals['create']: self.paramvals['missing'] = 'create' @@ -323,7 +363,7 @@ class LookupModule(LookupBase): # Set PASSWORD_STORE_DIR self.env['PASSWORD_STORE_DIR'] = self.paramvals['directory'] elif self.is_real_pass(): - raise AnsibleError('Passwordstore directory \'{0}\' does not exist'.format(self.paramvals['directory'])) + raise AnsibleError(f"Passwordstore directory '{self.paramvals['directory']}' does not exist") # Set PASSWORD_STORE_UMASK if umask is set if self.paramvals.get('umask') is not None: @@ -353,19 +393,19 @@ class LookupModule(LookupBase): name, value = line.split(':', 1) self.passdict[name.strip()] = value.strip() if (self.backend == 'gopass' or - os.path.isfile(os.path.join(self.paramvals['directory'], self.passname + ".gpg")) + os.path.isfile(os.path.join(self.paramvals['directory'], f"{self.passname}.gpg")) or not self.is_real_pass()): # When using real pass, only accept password as found if there is a .gpg file for it (might be a tree node otherwise) return True except (subprocess.CalledProcessError) as e: # 'not in password store' is the expected error if a password wasn't found if 'not in the password store' not in e.output: - raise AnsibleError('exit code {0} while running {1}. Error output: {2}'.format(e.returncode, e.cmd, e.output)) + raise AnsibleError(f'exit code {e.returncode} while running {e.cmd}. Error output: {e.output}') if self.paramvals['missing'] == 'error': - raise AnsibleError('passwordstore: passname {0} not found and missing=error is set'.format(self.passname)) + raise AnsibleError(f'passwordstore: passname {self.passname} not found and missing=error is set') elif self.paramvals['missing'] == 'warn': - display.warning('passwordstore: passname {0} not found'.format(self.passname)) + display.warning(f'passwordstore: passname {self.passname} not found') return False @@ -383,17 +423,51 @@ class LookupModule(LookupBase): def update_password(self): # generate new password, insert old lines from current result and return new password + # if the target is a subkey, only modify the subkey newpass = self.get_newpass() datetime = time.strftime("%d/%m/%Y %H:%M:%S") - msg = newpass + '\n' - if self.passoutput[1:]: - msg += '\n'.join(self.passoutput[1:]) + '\n' - if self.paramvals['backup']: - msg += "lookup_pass: old password was {0} (Updated on {1})\n".format(self.password, datetime) + subkey = self.paramvals["subkey"] + + if subkey != "password": + + msg_lines = [] + subkey_exists = False + subkey_line = f"{subkey}: {newpass}" + oldpass = None + + for line in self.passoutput: + if line.startswith(f"{subkey}: "): + oldpass = self.passdict[subkey] + line = subkey_line + subkey_exists = True + + msg_lines.append(line) + + if not subkey_exists: + msg_lines.insert(2, subkey_line) + + if self.paramvals["timestamp"] and self.paramvals["backup"] and oldpass and oldpass != newpass: + msg_lines.append( + f"lookup_pass: old subkey '{subkey}' password was {oldpass} (Updated on {datetime})\n" + ) + + msg = os.linesep.join(msg_lines) + + else: + msg = newpass + + if self.paramvals['preserve'] or self.paramvals['timestamp']: + msg += '\n' + if self.paramvals['preserve'] and self.passoutput[1:]: + msg += '\n'.join(self.passoutput[1:]) + msg += '\n' + if self.paramvals['timestamp'] and self.paramvals['backup']: + msg += f"lookup_pass: old password was {self.password} (Updated on {datetime})\n" + try: check_output2([self.pass_cmd, 'insert', '-f', '-m', self.passname], input=msg, env=self.env) except (subprocess.CalledProcessError) as e: - raise AnsibleError('exit code {0} while running {1}. Error output: {2}'.format(e.returncode, e.cmd, e.output)) + raise AnsibleError(f'exit code {e.returncode} while running {e.cmd}. Error output: {e.output}') return newpass def generate_password(self): @@ -401,11 +475,21 @@ class LookupModule(LookupBase): # use pwgen to generate the password and insert values with pass -m newpass = self.get_newpass() datetime = time.strftime("%d/%m/%Y %H:%M:%S") - msg = newpass + '\n' + "lookup_pass: First generated by ansible on {0}\n".format(datetime) + subkey = self.paramvals["subkey"] + + if subkey != "password": + msg = f"\n\n{subkey}: {newpass}" + else: + msg = newpass + + if self.paramvals['timestamp']: + msg += f"\nlookup_pass: First generated by ansible on {datetime}\n" + try: check_output2([self.pass_cmd, 'insert', '-f', '-m', self.passname], input=msg, env=self.env) except (subprocess.CalledProcessError) as e: - raise AnsibleError('exit code {0} while running {1}. Error output: {2}'.format(e.returncode, e.cmd, e.output)) + raise AnsibleError(f'exit code {e.returncode} while running {e.cmd}. Error output: {e.output}') + return newpass def get_passresult(self): @@ -417,13 +501,24 @@ class LookupModule(LookupBase): if self.paramvals['subkey'] in self.passdict: return self.passdict[self.paramvals['subkey']] else: + if self.paramvals["missing_subkey"] == "error": + raise AnsibleError( + f"passwordstore: subkey {self.paramvals['subkey']} for passname {self.passname} not found and missing_subkey=error is set" + ) + + if self.paramvals["missing_subkey"] == "warn": + display.warning( + f"passwordstore: subkey {self.paramvals['subkey']} for passname {self.passname} not found" + ) + return None @contextmanager def opt_lock(self, type): if self.get_option('lock') == type: tmpdir = os.environ.get('TMPDIR', '/tmp') - lockfile = os.path.join(tmpdir, '.passwordstore.lock') + user = os.environ.get('USER') + lockfile = os.path.join(tmpdir, f'.{user}.passwordstore.lock') with FileLock().lock_file(lockfile, tmpdir, self.lock_timeout): self.locked = type yield @@ -437,7 +532,7 @@ class LookupModule(LookupBase): self.locked = None timeout = self.get_option('locktimeout') if not re.match('^[0-9]+[smh]$', timeout): - raise AnsibleError("{0} is not a correct value for locktimeout".format(timeout)) + raise AnsibleError(f"{timeout} is not a correct value for locktimeout") unit_to_seconds = {"s": 1, "m": 60, "h": 3600} self.lock_timeout = int(timeout[:-1]) * unit_to_seconds[timeout[-1]] @@ -464,6 +559,9 @@ class LookupModule(LookupBase): 'backup': self.get_option('backup'), 'missing': self.get_option('missing'), 'umask': self.get_option('umask'), + 'timestamp': self.get_option('timestamp'), + 'preserve': self.get_option('preserve'), + "missing_subkey": self.get_option("missing_subkey"), } def run(self, terms, variables, **kwargs): @@ -474,13 +572,20 @@ class LookupModule(LookupBase): for term in terms: self.parse_params(term) # parse the input into paramvals with self.opt_lock('readwrite'): - if self.check_pass(): # password exists - if self.paramvals['overwrite'] and self.paramvals['subkey'] == 'password': + if self.check_pass(): # password file exists + if self.paramvals['overwrite']: # if "overwrite", always update password + with self.opt_lock('write'): + result.append(self.update_password()) + elif ( + self.paramvals["subkey"] != "password" + and not self.passdict.get(self.paramvals["subkey"]) + and self.paramvals["missing"] == "create" + ): # target is a subkey, this subkey is not in passdict BUT missing == create with self.opt_lock('write'): result.append(self.update_password()) else: result.append(self.get_passresult()) - else: # password does not exist + else: # password does not exist if self.paramvals['missing'] == 'create': with self.opt_lock('write'): if self.locked == 'write' and self.check_pass(): # lookup password again if under write lock diff --git a/plugins/lookup/random_pet.py b/plugins/lookup/random_pet.py index 71a62cbca0..0ab3ee29d3 100644 --- a/plugins/lookup/random_pet.py +++ b/plugins/lookup/random_pet.py @@ -1,45 +1,43 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2021, Abhijeet Kasurde # Copyright (c) 2018, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = r''' - name: random_pet - author: - - Abhijeet Kasurde (@Akasurde) - short_description: Generates random pet names - version_added: '3.1.0' - requirements: - - petname U(https://github.com/dustinkirkland/python-petname) +DOCUMENTATION = r""" +name: random_pet +author: + - Abhijeet Kasurde (@Akasurde) +short_description: Generates random pet names +version_added: '3.1.0' +requirements: + - petname U(https://github.com/dustinkirkland/python-petname) +description: + - Generates random pet names that can be used as unique identifiers for the resources. +options: + words: description: - - Generates random pet names that can be used as unique identifiers for the resources. - options: - words: - description: - - The number of words in the pet name. - default: 2 - type: int - length: - description: - - The maximal length of every component of the pet name. - - Values below 3 will be set to 3 by petname. - default: 6 - type: int - prefix: - description: A string to prefix with the name. - type: str - separator: - description: The character to separate words in the pet name. - default: "-" - type: str -''' + - The number of words in the pet name. + default: 2 + type: int + length: + description: + - The maximal length of every component of the pet name. + - Values below V(3) are set to V(3) by petname. + default: 6 + type: int + prefix: + description: A string to prefix with the name. + type: str + separator: + description: The character to separate words in the pet name. + default: "-" + type: str +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Generate pet name ansible.builtin.debug: var: lookup('community.general.random_pet') @@ -59,14 +57,14 @@ EXAMPLES = r''' ansible.builtin.debug: var: lookup('community.general.random_pet', length=7) # Example result: 'natural-peacock' -''' +""" -RETURN = r''' - _raw: - description: A one-element list containing a random pet name - type: list - elements: str -''' +RETURN = r""" +_raw: + description: A one-element list containing a random pet name. + type: list + elements: str +""" try: import petname @@ -95,6 +93,6 @@ class LookupModule(LookupBase): values = petname.Generate(words=words, separator=separator, letters=length) if prefix: - values = "%s%s%s" % (prefix, separator, values) + values = f"{prefix}{separator}{values}" return [values] diff --git a/plugins/lookup/random_string.py b/plugins/lookup/random_string.py index 199aa13964..027a587ad8 100644 --- a/plugins/lookup/random_string.py +++ b/plugins/lookup/random_string.py @@ -1,125 +1,157 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2021, Abhijeet Kasurde # Copyright (c) 2018, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type DOCUMENTATION = r""" - name: random_string - author: - - Abhijeet Kasurde (@Akasurde) - short_description: Generates random string - version_added: '3.2.0' +name: random_string +author: + - Abhijeet Kasurde (@Akasurde) +short_description: Generates random string +version_added: '3.2.0' +description: + - Generates random string based upon the given constraints. + - Uses L(secrets.SystemRandom,https://docs.python.org/3/library/secrets.html#secrets.SystemRandom), so should be strong enough + for cryptographic purposes. +options: + length: + description: The length of the string. + default: 8 + type: int + upper: description: - - Generates random string based upon the given constraints. - options: - length: - description: The length of the string. - default: 8 - type: int - upper: - description: - - Include uppercase letters in the string. - default: true - type: bool - lower: - description: - - Include lowercase letters in the string. - default: true - type: bool - numbers: - description: - - Include numbers in the string. - default: true - type: bool - special: - description: - - Include special characters in the string. - - Special characters are taken from Python standard library C(string). - See L(the documentation of string.punctuation,https://docs.python.org/3/library/string.html#string.punctuation) - for which characters will be used. - - The choice of special characters can be changed to setting I(override_special). - default: true - type: bool - min_numeric: - description: - - Minimum number of numeric characters in the string. - - If set, overrides I(numbers=false). - default: 0 - type: int - min_upper: - description: - - Minimum number of uppercase alphabets in the string. - - If set, overrides I(upper=false). - default: 0 - type: int - min_lower: - description: - - Minimum number of lowercase alphabets in the string. - - If set, overrides I(lower=false). - default: 0 - type: int - min_special: - description: - - Minimum number of special character in the string. - default: 0 - type: int - override_special: - description: - - Overide a list of special characters to use in the string. - - If set I(min_special) should be set to a non-default value. - type: str - override_all: - description: - - Override all values of I(numbers), I(upper), I(lower), and I(special) with - the given list of characters. - type: str - base64: - description: - - Returns base64 encoded string. - type: bool - default: false + - Possibly include uppercase letters in the string. + - To ensure atleast one uppercase letter, set O(min_upper) to V(1). + default: true + type: bool + lower: + description: + - Possibly include lowercase letters in the string. + - To ensure atleast one lowercase letter, set O(min_lower) to V(1). + default: true + type: bool + numbers: + description: + - Possibly include numbers in the string. + - To ensure atleast one numeric character, set O(min_numeric) to V(1). + default: true + type: bool + special: + description: + - Possibly include special characters in the string. + - Special characters are taken from Python standard library C(string). See L(the documentation of + string.punctuation,https://docs.python.org/3/library/string.html#string.punctuation) + for which characters are used. + - The choice of special characters can be changed to setting O(override_special). + - To ensure atleast one special character, set O(min_special) to V(1). + default: true + type: bool + min_numeric: + description: + - Minimum number of numeric characters in the string. + - If set, overrides O(numbers=false). + default: 0 + type: int + min_upper: + description: + - Minimum number of uppercase alphabets in the string. + - If set, overrides O(upper=false). + default: 0 + type: int + min_lower: + description: + - Minimum number of lowercase alphabets in the string. + - If set, overrides O(lower=false). + default: 0 + type: int + min_special: + description: + - Minimum number of special character in the string. + default: 0 + type: int + override_special: + description: + - Override a list of special characters to use in the string. + - If set O(min_special) should be set to a non-default value. + type: str + override_all: + description: + - Override all values of O(numbers), O(upper), O(lower), and O(special) with the given list of characters. + type: str + ignore_similar_chars: + description: + - Ignore similar characters, such as V(l) and V(1), or V(O) and V(0). + - These characters can be configured in O(similar_chars). + default: false + type: bool + version_added: 7.5.0 + similar_chars: + description: + - Override a list of characters not to be use in the string. + default: "il1LoO0" + type: str + version_added: 7.5.0 + base64: + description: + - Returns base64 encoded string. + type: bool + default: false + seed: + description: + - Seed for random string generator. + - B(Note) that this drastically reduces the security of this plugin. First, when O(seed) is provided, a non-cryptographic random number generator is used. + Second, if the seed does not contain enough entropy, the generated string is weak. + B(Do not use the generated string as a password or a secure token when using this option!) + type: str + version_added: 11.3.0 """ EXAMPLES = r""" - name: Generate random string ansible.builtin.debug: var: lookup('community.general.random_string') - # Example result: ['DeadBeeF'] + # Example result: 'DeadBeeF' + +- name: Generate random string with seed + ansible.builtin.debug: + var: lookup('community.general.random_string', seed=12345) + # Example result: '6[~(2q5O' + # NOTE: Do **not** use this string as a password or a secure token, + # unless you know exactly what you are doing! + # Specifying seed uses a non-secure random number generator. - name: Generate random string with length 12 ansible.builtin.debug: var: lookup('community.general.random_string', length=12) - # Example result: ['Uan0hUiX5kVG'] + # Example result: 'Uan0hUiX5kVG' - name: Generate base64 encoded random string ansible.builtin.debug: var: lookup('community.general.random_string', base64=True) - # Example result: ['NHZ6eWN5Qk0='] + # Example result: 'NHZ6eWN5Qk0=' -- name: Generate a random string with 1 lower, 1 upper, 1 number and 1 special char (atleast) +- name: Generate a random string with 1 lower, 1 upper, 1 number and 1 special char (at least) ansible.builtin.debug: var: lookup('community.general.random_string', min_lower=1, min_upper=1, min_special=1, min_numeric=1) - # Example result: ['&Qw2|E[-'] + # Example result: '&Qw2|E[-' - name: Generate a random string with all lower case characters - debug: + ansible.builtin.debug: var: query('community.general.random_string', upper=false, numbers=false, special=false) # Example result: ['exolxzyz'] - name: Generate random hexadecimal string - debug: + ansible.builtin.debug: var: query('community.general.random_string', upper=false, lower=false, override_special=hex_chars, numbers=false) vars: hex_chars: '0123456789ABCDEF' # Example result: ['D2A40737'] - name: Generate random hexadecimal string with override_all - debug: + ansible.builtin.debug: var: query('community.general.random_string', override_all=hex_chars) vars: hex_chars: '0123456789ABCDEF' @@ -127,14 +159,15 @@ EXAMPLES = r""" """ RETURN = r""" - _raw: - description: A one-element list containing a random string - type: list - elements: str +_raw: + description: A one-element list containing a random string. + type: list + elements: str """ import base64 import random +import secrets import string from ansible.errors import AnsibleLookupError @@ -164,16 +197,30 @@ class LookupModule(LookupBase): lower_chars = string.ascii_lowercase upper_chars = string.ascii_uppercase special_chars = string.punctuation - random_generator = random.SystemRandom() self.set_options(var_options=variables, direct=kwargs) length = self.get_option("length") base64_flag = self.get_option("base64") override_all = self.get_option("override_all") + ignore_similar_chars = self.get_option("ignore_similar_chars") + similar_chars = self.get_option("similar_chars") + seed = self.get_option("seed") + + if seed is None: + random_generator = secrets.SystemRandom() + else: + random_generator = random.Random(seed) + values = "" available_chars_set = "" + if ignore_similar_chars: + number_chars = "".join([sc for sc in number_chars if sc not in similar_chars]) + lower_chars = "".join([sc for sc in lower_chars if sc not in similar_chars]) + upper_chars = "".join([sc for sc in upper_chars if sc not in similar_chars]) + special_chars = "".join([sc for sc in special_chars if sc not in similar_chars]) + if override_all: # Override all the values available_chars_set = override_all @@ -210,10 +257,11 @@ class LookupModule(LookupBase): remaining_pass_len = length - len(values) values += self.get_random(random_generator, available_chars_set, remaining_pass_len) - # Get pseudo randomization shuffled_values = list(values) - # Randomize the order - random.shuffle(shuffled_values) + if seed is None: + # Get pseudo randomization + # Randomize the order + random.shuffle(shuffled_values) if base64_flag: return [self.b64encode("".join(shuffled_values))] diff --git a/plugins/lookup/random_words.py b/plugins/lookup/random_words.py index a4aa1b3178..dd06e701f8 100644 --- a/plugins/lookup/random_words.py +++ b/plugins/lookup/random_words.py @@ -1,53 +1,50 @@ -# -*- coding: utf-8 -*- # Copyright (c) Ansible project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later """The community.general.random_words Ansible lookup plugin.""" -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type DOCUMENTATION = r""" - name: random_words - author: - - Thomas Sjögren (@konstruktoid) - short_description: Return a number of random words - version_added: "4.0.0" - requirements: - - xkcdpass U(https://github.com/redacted/XKCD-password-generator) +name: random_words +author: + - Thomas Sjögren (@konstruktoid) +short_description: Return a number of random words +version_added: "4.0.0" +requirements: + - xkcdpass U(https://github.com/redacted/XKCD-password-generator) +description: + - Returns a number of random words. The output can for example be used for passwords. + - See U(https://xkcd.com/936/) for background. +options: + numwords: description: - - Returns a number of random words. The output can for example be used for - passwords. - - See U(https://xkcd.com/936/) for background. - options: - numwords: - description: - - The number of words. - default: 6 - type: int - min_length: - description: - - Minimum length of words to make password. - default: 5 - type: int - max_length: - description: - - Maximum length of words to make password. - default: 9 - type: int - delimiter: - description: - - The delimiter character between words. - default: " " - type: str - case: - description: - - The method for setting the case of each word in the passphrase. - choices: ["alternating", "upper", "lower", "random", "capitalize"] - default: "lower" - type: str + - The number of words. + default: 6 + type: int + min_length: + description: + - Minimum length of words to make password. + default: 5 + type: int + max_length: + description: + - Maximum length of words to make password. + default: 9 + type: int + delimiter: + description: + - The delimiter character between words. + default: " " + type: str + case: + description: + - The method for setting the case of each word in the passphrase. + choices: ["alternating", "upper", "lower", "random", "capitalize"] + default: "lower" + type: str """ EXAMPLES = r""" @@ -74,10 +71,10 @@ EXAMPLES = r""" """ RETURN = r""" - _raw: - description: A single-element list containing random words. - type: list - elements: str +_raw: + description: A single-element list containing random words. + type: list + elements: str """ from ansible.errors import AnsibleLookupError diff --git a/plugins/lookup/redis.py b/plugins/lookup/redis.py index 490751a398..0073796a22 100644 --- a/plugins/lookup/redis.py +++ b/plugins/lookup/redis.py @@ -1,52 +1,53 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2012, Jan-Piet Mens # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: redis - author: - - Jan-Piet Mens (@jpmens) - - Ansible Core Team - short_description: fetch data from Redis - description: - - This lookup returns a list of results from a Redis DB corresponding to a list of items given to it - requirements: - - redis (python library https://github.com/andymccurdy/redis-py/) - options: - _terms: - description: list of keys to query - host: - description: location of Redis host - default: '127.0.0.1' - env: - - name: ANSIBLE_REDIS_HOST - ini: - - section: lookup_redis - key: host - port: - description: port on which Redis is listening on - default: 6379 - type: int - env: - - name: ANSIBLE_REDIS_PORT - ini: - - section: lookup_redis - key: port - socket: - description: path to socket on which to query Redis, this option overrides host and port options when set. - type: path - env: - - name: ANSIBLE_REDIS_SOCKET - ini: - - section: lookup_redis - key: socket -''' +DOCUMENTATION = r""" +name: redis +author: + - Jan-Piet Mens (@jpmens) + - Ansible Core Team +short_description: Fetch data from Redis +description: + - This lookup returns a list of results from a Redis DB corresponding to a list of items given to it. +requirements: + - redis (python library https://github.com/andymccurdy/redis-py/) +options: + _terms: + description: List of keys to query. + type: list + elements: string + host: + description: Location of Redis host. + type: string + default: '127.0.0.1' + env: + - name: ANSIBLE_REDIS_HOST + ini: + - section: lookup_redis + key: host + port: + description: Port on which Redis is listening on. + default: 6379 + type: int + env: + - name: ANSIBLE_REDIS_PORT + ini: + - section: lookup_redis + key: port + socket: + description: Path to socket on which to query Redis, this option overrides host and port options when set. + type: path + env: + - name: ANSIBLE_REDIS_SOCKET + ini: + - section: lookup_redis + key: socket +""" -EXAMPLES = """ +EXAMPLES = r""" - name: query redis for somekey (default or configured settings used) ansible.builtin.debug: msg: "{{ lookup('community.general.redis', 'somekey') }}" @@ -63,18 +64,15 @@ EXAMPLES = """ - name: use list directly with a socket ansible.builtin.debug: msg: "{{ lookup('community.general.redis', 'key1', 'key2', socket='/var/tmp/redis.sock') }}" - """ -RETURN = """ +RETURN = r""" _raw: - description: value(s) stored in Redis + description: Value(s) stored in Redis. type: list elements: str """ -import os - HAVE_REDIS = False try: import redis @@ -115,5 +113,5 @@ class LookupModule(LookupBase): ret.append(to_text(res)) except Exception as e: # connection failed or key not found - raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e)) + raise AnsibleError(f'Encountered exception while fetching {term}: {e}') return ret diff --git a/plugins/lookup/revbitspss.py b/plugins/lookup/revbitspss.py index 552970804e..86e3fbe38c 100644 --- a/plugins/lookup/revbitspss.py +++ b/plugins/lookup/revbitspss.py @@ -1,10 +1,8 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2021, RevBits # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type DOCUMENTATION = r""" name: revbitspss @@ -12,60 +10,60 @@ author: RevBits (@RevBits) short_description: Get secrets from RevBits PAM server version_added: 4.1.0 description: - - Uses the revbits_ansible Python SDK to get Secrets from RevBits PAM - Server using API key authentication with the REST API. + - Uses the revbits_ansible Python SDK to get Secrets from RevBits PAM Server using API key authentication with the REST + API. requirements: - - revbits_ansible - U(https://pypi.org/project/revbits_ansible/) + - revbits_ansible - U(https://pypi.org/project/revbits_ansible/) options: - _terms: - description: - - This will be an array of keys for secrets which you want to fetch from RevBits PAM. - required: true - type: list - elements: string - base_url: - description: - - This will be the base URL of the server, for example C(https://server-url-here). - required: true - type: string - api_key: - description: - - This will be the API key for authentication. You can get it from the RevBits PAM secret manager module. - required: true - type: string + _terms: + description: + - This is an array of keys for secrets which you want to fetch from RevBits PAM. + required: true + type: list + elements: string + base_url: + description: + - This is the base URL of the server, for example V(https://server-url-here). + required: true + type: string + api_key: + description: + - This is the API key for authentication. You can get it from the RevBits PAM secret manager module. + required: true + type: string """ RETURN = r""" _list: - description: - - The JSON responses which you can access with defined keys. - - If you are fetching secrets named as UUID, PASSWORD it will gives you the dict of all secrets. - type: list - elements: dict + description: + - The JSON responses which you can access with defined keys. + - If you are fetching secrets named as UUID, PASSWORD it returns the dict of all secrets. + type: list + elements: dict """ EXAMPLES = r""" +--- - hosts: localhost vars: - secret: >- - {{ - lookup( - 'community.general.revbitspss', - 'UUIDPAM', 'DB_PASS', - base_url='https://server-url-here', - api_key='API_KEY_GOES_HERE' - ) - }} + secret: >- + {{ + lookup( + 'community.general.revbitspss', + 'UUIDPAM', 'DB_PASS', + base_url='https://server-url-here', + api_key='API_KEY_GOES_HERE' + ) + }} tasks: - - ansible.builtin.debug: - msg: > - UUIDPAM is {{ (secret['UUIDPAM']) }} and DB_PASS is {{ (secret['DB_PASS']) }} + - ansible.builtin.debug: + msg: >- + UUIDPAM is {{ (secret['UUIDPAM']) }} and DB_PASS is {{ (secret['DB_PASS']) }} """ from ansible.plugins.lookup import LookupBase from ansible.utils.display import Display from ansible.errors import AnsibleError -from ansible.module_utils.six import raise_from try: from pam.revbits_ansible.server import SecretServer @@ -86,10 +84,7 @@ class LookupModule(LookupBase): def run(self, terms, variables, **kwargs): if ANOTHER_LIBRARY_IMPORT_ERROR: - raise_from( - AnsibleError('revbits_ansible must be installed to use this plugin'), - ANOTHER_LIBRARY_IMPORT_ERROR - ) + raise AnsibleError('revbits_ansible must be installed to use this plugin') from ANOTHER_LIBRARY_IMPORT_ERROR self.set_options(var_options=variables, direct=kwargs) secret_server = LookupModule.Client( { @@ -100,8 +95,8 @@ class LookupModule(LookupBase): result = [] for term in terms: try: - display.vvv(u"Secret Server lookup of Secret with ID %s" % term) + display.vvv(f"Secret Server lookup of Secret with ID {term}") result.append({term: secret_server.get_pam_secret(term)}) except Exception as error: - raise AnsibleError("Secret Server lookup failure: %s" % error.message) + raise AnsibleError(f"Secret Server lookup failure: {error.message}") return result diff --git a/plugins/lookup/shelvefile.py b/plugins/lookup/shelvefile.py index 35f1097c8b..54d96e91d2 100644 --- a/plugins/lookup/shelvefile.py +++ b/plugins/lookup/shelvefile.py @@ -1,35 +1,38 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2015, Alejandro Guirao # Copyright (c) 2012-17 Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' - name: shelvefile - author: Alejandro Guirao (!UNKNOWN) - short_description: read keys from Python shelve file - description: - - Read keys from Python shelve file. - options: - _terms: - description: Sets of key value pairs of parameters. - key: - description: Key to query. - required: true - file: - description: Path to shelve file. - required: true -''' +DOCUMENTATION = r""" +name: shelvefile +author: Alejandro Guirao (!UNKNOWN) +short_description: Read keys from Python shelve file +description: + - Read keys from Python shelve file. +options: + _terms: + description: Sets of key value pairs of parameters. + type: list + elements: str + key: + description: Key to query. + type: str + required: true + file: + description: Path to shelve file. + type: path + required: true +""" -EXAMPLES = """ +EXAMPLES = r""" +--- - name: Retrieve a string value corresponding to a key inside a Python shelve file ansible.builtin.debug: msg: "{{ lookup('community.general.shelvefile', 'file=path_to_some_shelve_file.db key=key_to_retrieve') }}" """ -RETURN = """ +RETURN = r""" _list: description: Value(s) of key(s) in shelve file(s). type: list @@ -67,7 +70,7 @@ class LookupModule(LookupBase): for param in params: name, value = param.split('=') if name not in paramvals: - raise AnsibleAssertionError('%s not in paramvals' % name) + raise AnsibleAssertionError(f'{name} not in paramvals') paramvals[name] = value except (ValueError, AssertionError) as e: @@ -82,11 +85,11 @@ class LookupModule(LookupBase): if shelvefile: res = self.read_shelve(shelvefile, key) if res is None: - raise AnsibleError("Key %s not found in shelve file %s" % (key, shelvefile)) + raise AnsibleError(f"Key {key} not found in shelve file {shelvefile}") # Convert the value read to string ret.append(to_text(res)) break else: - raise AnsibleError("Could not locate shelve file in lookup: %s" % paramvals['file']) + raise AnsibleError(f"Could not locate shelve file in lookup: {paramvals['file']}") return ret diff --git a/plugins/lookup/tss.py b/plugins/lookup/tss.py index 935b5f4b46..e612446374 100644 --- a/plugins/lookup/tss.py +++ b/plugins/lookup/tss.py @@ -1,10 +1,8 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2020, Adam Migus # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type DOCUMENTATION = r""" name: tss @@ -12,184 +10,273 @@ author: Adam Migus (@amigus) short_description: Get secrets from Thycotic Secret Server version_added: 1.0.0 description: - - Uses the Thycotic Secret Server Python SDK to get Secrets from Secret - Server using token authentication with I(username) and I(password) on - the REST API at I(base_url). - - When using self-signed certificates the environment variable - C(REQUESTS_CA_BUNDLE) can be set to a file containing the trusted certificates - (in C(.pem) format). - - For example, C(export REQUESTS_CA_BUNDLE='/etc/ssl/certs/ca-bundle.trust.crt'). + - Uses the Thycotic Secret Server Python SDK to get Secrets from Secret Server using token authentication with O(username) + and O(password) on the REST API at O(base_url). + - When using self-signed certificates the environment variable E(REQUESTS_CA_BUNDLE) can be set to a file containing the + trusted certificates (in C(.pem) format). + - For example, C(export REQUESTS_CA_BUNDLE='/etc/ssl/certs/ca-bundle.trust.crt'). requirements: - - python-tss-sdk - https://pypi.org/project/python-tss-sdk/ + - python-tss-sdk - https://pypi.org/project/python-tss-sdk/ options: - _terms: - description: The integer ID of the secret. - required: true - type: int - base_url: - description: The base URL of the server, e.g. C(https://localhost/SecretServer). - env: - - name: TSS_BASE_URL - ini: - - section: tss_lookup - key: base_url - required: true - username: - description: The username with which to request the OAuth2 Access Grant. - env: - - name: TSS_USERNAME - ini: - - section: tss_lookup - key: username - password: - description: - - The password associated with the supplied username. - - Required when I(token) is not provided. - env: - - name: TSS_PASSWORD - ini: - - section: tss_lookup - key: password - domain: - default: "" - description: - - The domain with which to request the OAuth2 Access Grant. - - Optional when I(token) is not provided. - - Requires C(python-tss-sdk) version 1.0.0 or greater. - env: - - name: TSS_DOMAIN - ini: - - section: tss_lookup - key: domain - required: false - version_added: 3.6.0 - token: - description: - - Existing token for Thycotic authorizer. - - If provided, I(username) and I(password) are not needed. - - Requires C(python-tss-sdk) version 1.0.0 or greater. - env: - - name: TSS_TOKEN - ini: - - section: tss_lookup - key: token - version_added: 3.7.0 - api_path_uri: - default: /api/v1 - description: The path to append to the base URL to form a valid REST - API request. - env: - - name: TSS_API_PATH_URI - required: false - token_path_uri: - default: /oauth2/token - description: The path to append to the base URL to form a valid OAuth2 - Access Grant request. - env: - - name: TSS_TOKEN_PATH_URI - required: false + _terms: + description: The integer ID of the secret. + required: true + type: list + elements: int + secret_path: + description: Indicate a full path of secret including folder and secret name when the secret ID is set to 0. + required: false + type: str + version_added: 7.2.0 + fetch_secret_ids_from_folder: + description: + - Boolean flag which indicates whether secret IDs are in a folder is fetched by folder ID or not. + - V(true) then the terms are considered as a folder IDs. Otherwise (default), they are considered as secret IDs. + required: false + type: bool + version_added: 7.1.0 + fetch_attachments: + description: + - Boolean flag which indicates whether attached files are downloaded or not. + - The download only happens if O(file_download_path) has been provided. + required: false + type: bool + version_added: 7.0.0 + file_download_path: + description: Indicate the file attachment download location. + required: false + type: path + version_added: 7.0.0 + base_url: + description: The base URL of the server, for example V(https://localhost/SecretServer). + type: string + env: + - name: TSS_BASE_URL + ini: + - section: tss_lookup + key: base_url + required: true + username: + description: The username with which to request the OAuth2 Access Grant. + type: string + env: + - name: TSS_USERNAME + ini: + - section: tss_lookup + key: username + password: + description: + - The password associated with the supplied username. + - Required when O(token) is not provided. + type: string + env: + - name: TSS_PASSWORD + ini: + - section: tss_lookup + key: password + domain: + default: "" + description: + - The domain with which to request the OAuth2 Access Grant. + - Optional when O(token) is not provided. + - Requires C(python-tss-sdk) version 1.0.0 or greater. + type: string + env: + - name: TSS_DOMAIN + ini: + - section: tss_lookup + key: domain + required: false + version_added: 3.6.0 + token: + description: + - Existing token for Thycotic authorizer. + - If provided, O(username) and O(password) are not needed. + - Requires C(python-tss-sdk) version 1.0.0 or greater. + type: string + env: + - name: TSS_TOKEN + ini: + - section: tss_lookup + key: token + version_added: 3.7.0 + api_path_uri: + default: /api/v1 + description: The path to append to the base URL to form a valid REST API request. + type: string + env: + - name: TSS_API_PATH_URI + required: false + token_path_uri: + default: /oauth2/token + description: The path to append to the base URL to form a valid OAuth2 Access Grant request. + type: string + env: + - name: TSS_TOKEN_PATH_URI + required: false """ RETURN = r""" _list: - description: - - The JSON responses to C(GET /secrets/{id}). - - See U(https://updates.thycotic.net/secretserver/restapiguide/TokenAuth/#operation--secrets--id--get). - type: list - elements: dict + description: + - The JSON responses to C(GET /secrets/{id}). + - See U(https://updates.thycotic.net/secretserver/restapiguide/TokenAuth/#operation--secrets--id--get). + type: list + elements: dict """ EXAMPLES = r""" - hosts: localhost vars: - secret: >- - {{ - lookup( - 'community.general.tss', - 102, - base_url='https://secretserver.domain.com/SecretServer/', - username='user.name', - password='password' - ) - }} + secret: >- + {{ + lookup( + 'community.general.tss', + 102, + base_url='https://secretserver.domain.com/SecretServer/', + username='user.name', + password='password' + ) + }} tasks: - - ansible.builtin.debug: - msg: > - the password is {{ - (secret['items'] - | items2dict(key_name='slug', - value_name='itemValue'))['password'] - }} + - ansible.builtin.debug: + msg: > + the password is {{ + (secret['items'] + | items2dict(key_name='slug', + value_name='itemValue'))['password'] + }} - hosts: localhost vars: - secret: >- - {{ - lookup( - 'community.general.tss', - 102, - base_url='https://secretserver.domain.com/SecretServer/', - username='user.name', - password='password', - domain='domain' - ) - }} + secret: >- + {{ + lookup( + 'community.general.tss', + 102, + base_url='https://secretserver.domain.com/SecretServer/', + username='user.name', + password='password', + domain='domain' + ) + }} tasks: - - ansible.builtin.debug: - msg: > - the password is {{ - (secret['items'] - | items2dict(key_name='slug', - value_name='itemValue'))['password'] - }} + - ansible.builtin.debug: + msg: > + the password is {{ + (secret['items'] + | items2dict(key_name='slug', + value_name='itemValue'))['password'] + }} - hosts: localhost vars: - secret_password: >- - {{ - ((lookup( - 'community.general.tss', - 102, - base_url='https://secretserver.domain.com/SecretServer/', - token='thycotic_access_token', - ) | from_json).get('items') | items2dict(key_name='slug', value_name='itemValue'))['password'] - }} + secret_password: >- + {{ + ((lookup( + 'community.general.tss', + 102, + base_url='https://secretserver.domain.com/SecretServer/', + token='thycotic_access_token', + ) | from_json).get('items') | items2dict(key_name='slug', value_name='itemValue'))['password'] + }} tasks: - - ansible.builtin.debug: - msg: the password is {{ secret_password }} + - ansible.builtin.debug: + msg: the password is {{ secret_password }} + +# Private key stores into certificate file which is attached with secret. +# If fetch_attachments=True then private key file will be download on specified path +# and file content will display in debug message. +- hosts: localhost + vars: + secret: >- + {{ + lookup( + 'community.general.tss', + 102, + fetch_attachments=True, + file_download_path='/home/certs', + base_url='https://secretserver.domain.com/SecretServer/', + token='thycotic_access_token' + ) + }} + tasks: + - ansible.builtin.debug: + msg: > + the private key is {{ + (secret['items'] + | items2dict(key_name='slug', + value_name='itemValue'))['private-key'] + }} + +# If fetch_secret_ids_from_folder=true then secret IDs are in a folder is fetched based on folder ID +- hosts: localhost + vars: + secret: >- + {{ + lookup( + 'community.general.tss', + 102, + fetch_secret_ids_from_folder=true, + base_url='https://secretserver.domain.com/SecretServer/', + token='thycotic_access_token' + ) + }} + tasks: + - ansible.builtin.debug: + msg: > + the secret id's are {{ + secret + }} + +# If secret ID is 0 and secret_path has value then secret is fetched by secret path +- hosts: localhost + vars: + secret: >- + {{ + lookup( + 'community.general.tss', + 0, + secret_path='\folderName\secretName' + base_url='https://secretserver.domain.com/SecretServer/', + username='user.name', + password='password' + ) + }} + tasks: + - ansible.builtin.debug: + msg: >- + the password is {{ + (secret['items'] + | items2dict(key_name='slug', + value_name='itemValue'))['password'] + }} """ import abc - +import os from ansible.errors import AnsibleError, AnsibleOptionsError -from ansible.module_utils import six from ansible.plugins.lookup import LookupBase from ansible.utils.display import Display try: - from thycotic.secrets.server import SecretServer, SecretServerError + from delinea.secrets.server import SecretServer, SecretServerError, PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer, AccessTokenAuthorizer HAS_TSS_SDK = True + HAS_DELINEA_SS_SDK = True + HAS_TSS_AUTHORIZER = True except ImportError: try: - from delinea.secrets.server import SecretServer, SecretServerError + from thycotic.secrets.server import SecretServer, SecretServerError, PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer, AccessTokenAuthorizer HAS_TSS_SDK = True + HAS_DELINEA_SS_SDK = False + HAS_TSS_AUTHORIZER = True except ImportError: SecretServer = None SecretServerError = None HAS_TSS_SDK = False - -try: - from thycotic.secrets.server import PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer, AccessTokenAuthorizer - - HAS_TSS_AUTHORIZER = True -except ImportError: - try: - from delinea.secrets.server import PasswordGrantAuthorizer, DomainPasswordGrantAuthorizer, AccessTokenAuthorizer - - HAS_TSS_AUTHORIZER = True - except ImportError: + HAS_DELINEA_SS_SDK = False PasswordGrantAuthorizer = None DomainPasswordGrantAuthorizer = None AccessTokenAuthorizer = None @@ -199,8 +286,7 @@ except ImportError: display = Display() -@six.add_metaclass(abc.ABCMeta) -class TSSClient(object): +class TSSClient(object, metaclass=abc.ABCMeta): def __init__(self): self._client = None @@ -211,13 +297,49 @@ class TSSClient(object): else: return TSSClientV0(**server_parameters) - def get_secret(self, term): - display.debug("tss_lookup term: %s" % term) - + def get_secret(self, term, secret_path, fetch_file_attachments, file_download_path): + display.debug(f"tss_lookup term: {term}") secret_id = self._term_to_secret_id(term) - display.vvv(u"Secret Server lookup of Secret with ID %d" % secret_id) + if secret_id == 0 and secret_path: + fetch_secret_by_path = True + display.vvv(f"Secret Server lookup of Secret with path {secret_path}") + else: + fetch_secret_by_path = False + display.vvv(f"Secret Server lookup of Secret with ID {secret_id}") - return self._client.get_secret_json(secret_id) + if fetch_file_attachments: + if fetch_secret_by_path: + obj = self._client.get_secret_by_path(secret_path, fetch_file_attachments) + else: + obj = self._client.get_secret(secret_id, fetch_file_attachments) + for i in obj['items']: + if file_download_path and os.path.isdir(file_download_path): + if i['isFile']: + try: + file_content = i['itemValue'].content + with open(os.path.join(file_download_path, f"{obj['id']}_{i['slug']}"), "wb") as f: + f.write(file_content) + except ValueError: + raise AnsibleOptionsError(f"Failed to download {i['slug']}") + except AttributeError: + display.warning(f"Could not read file content for {i['slug']}") + finally: + i['itemValue'] = "*** Not Valid For Display ***" + else: + raise AnsibleOptionsError("File download path does not exist") + return obj + else: + if fetch_secret_by_path: + return self._client.get_secret_by_path(secret_path, False) + else: + return self._client.get_secret_json(secret_id) + + def get_secret_ids_by_folderid(self, term): + display.debug(f"tss_lookup term: {term}") + folder_id = self._term_to_folder_id(term) + display.vvv(f"Secret Server lookup of Secret id's with Folder ID {folder_id}") + + return self._client.get_secret_ids_by_folderid(folder_id) @staticmethod def _term_to_secret_id(term): @@ -226,6 +348,13 @@ class TSSClient(object): except ValueError: raise AnsibleOptionsError("Secret ID must be an integer") + @staticmethod + def _term_to_folder_id(term): + try: + return int(term) + except ValueError: + raise AnsibleOptionsError("Folder ID must be an integer") + class TSSClientV0(TSSClient): def __init__(self, **server_parameters): @@ -294,6 +423,20 @@ class LookupModule(LookupBase): ) try: - return [tss.get_secret(term) for term in terms] + if self.get_option("fetch_secret_ids_from_folder"): + if HAS_DELINEA_SS_SDK: + return [tss.get_secret_ids_by_folderid(term) for term in terms] + else: + raise AnsibleError("latest python-tss-sdk must be installed to use this plugin") + else: + return [ + tss.get_secret( + term, + self.get_option("secret_path"), + self.get_option("fetch_attachments"), + self.get_option("file_download_path"), + ) + for term in terms + ] except SecretServerError as error: - raise AnsibleError("Secret Server lookup failure: %s" % error.message) + raise AnsibleError(f"Secret Server lookup failure: {error.message}") diff --git a/tests/unit/compat/__init__.py b/plugins/module_utils/__init__.py similarity index 100% rename from tests/unit/compat/__init__.py rename to plugins/module_utils/__init__.py diff --git a/plugins/module_utils/_filelock.py b/plugins/module_utils/_filelock.py new file mode 100644 index 0000000000..f5d0e27608 --- /dev/null +++ b/plugins/module_utils/_filelock.py @@ -0,0 +1,108 @@ +# Copyright (c) 2018, Ansible Project +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause + +# NOTE: +# This has been vendored from ansible.module_utils.common.file. This code has been removed from there for ansible-core 2.16. + +from __future__ import annotations + +import os +import stat +import time +import fcntl +import sys + +from contextlib import contextmanager + + +class LockTimeout(Exception): + pass + + +class FileLock: + ''' + Currently FileLock is implemented via fcntl.flock on a lock file, however this + behaviour may change in the future. Avoid mixing lock types fcntl.flock, + fcntl.lockf and module_utils.common.file.FileLock as it will certainly cause + unwanted and/or unexpected behaviour + ''' + def __init__(self): + self.lockfd = None + + @contextmanager + def lock_file(self, path, tmpdir, lock_timeout=None): + ''' + Context for lock acquisition + ''' + try: + self.set_lock(path, tmpdir, lock_timeout) + yield + finally: + self.unlock() + + def set_lock(self, path, tmpdir, lock_timeout=None): + ''' + Create a lock file based on path with flock to prevent other processes + using given path. + Please note that currently file locking only works when it is executed by + the same user, for example single user scenarios + + :kw path: Path (file) to lock + :kw tmpdir: Path where to place the temporary .lock file + :kw lock_timeout: + Wait n seconds for lock acquisition, fail if timeout is reached. + 0 = Do not wait, fail if lock cannot be acquired immediately, + Default is None, wait indefinitely until lock is released. + :returns: True + ''' + lock_path = os.path.join(tmpdir, f'ansible-{os.path.basename(path)}.lock') + l_wait = 0.1 + r_exception = IOError + if sys.version_info[0] == 3: + r_exception = BlockingIOError + + self.lockfd = open(lock_path, 'w') + + if lock_timeout <= 0: + fcntl.flock(self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB) + os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD) + return True + + if lock_timeout: + e_secs = 0 + while e_secs < lock_timeout: + try: + fcntl.flock(self.lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB) + os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD) + return True + except r_exception: + time.sleep(l_wait) + e_secs += l_wait + continue + + self.lockfd.close() + raise LockTimeout(f'{lock_timeout} sec') + + fcntl.flock(self.lockfd, fcntl.LOCK_EX) + os.chmod(lock_path, stat.S_IWRITE | stat.S_IREAD) + + return True + + def unlock(self): + ''' + Make sure lock file is available for everyone and Unlock the file descriptor + locked by set_lock + + :returns: True + ''' + if not self.lockfd: + return True + + try: + fcntl.flock(self.lockfd, fcntl.LOCK_UN) + self.lockfd.close() + except ValueError: # file wasn't opened, let context manager fail gracefully + pass + + return True diff --git a/plugins/module_utils/_mount.py b/plugins/module_utils/_mount.py index 63de457d7d..33d191c845 100644 --- a/plugins/module_utils/_mount.py +++ b/plugins/module_utils/_mount.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is based on # Lib/posixpath.py of cpython @@ -8,9 +7,8 @@ # (See LICENSES/PSF-2.0.txt in this collection) # SPDX-License-Identifier: PSF-2.0 -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type import os diff --git a/plugins/module_utils/_stormssh.py b/plugins/module_utils/_stormssh.py new file mode 100644 index 0000000000..42a72eb674 --- /dev/null +++ b/plugins/module_utils/_stormssh.py @@ -0,0 +1,252 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is based on +# the config parser from here: https://github.com/emre/storm/blob/master/storm/parsers/ssh_config_parser.py +# Copyright (C) <2013> +# SPDX-License-Identifier: MIT + +from __future__ import annotations +import os +import re +import traceback +from operator import itemgetter + + +try: + from paramiko.config import SSHConfig +except ImportError: + SSHConfig = object + HAS_PARAMIKO = False + PARAMIKO_IMPORT_ERROR = traceback.format_exc() +else: + HAS_PARAMIKO = True + PARAMIKO_IMPORT_ERROR = None + + +class StormConfig(SSHConfig): + def parse(self, file_obj): + """ + Read an OpenSSH config from the given file object. + @param file_obj: a file-like object to read the config file from + @type file_obj: file + """ + order = 1 + host = {"host": ['*'], "config": {}, } + for line in file_obj: + line = line.rstrip('\n').lstrip() + if line == '': + self._config.append({ + 'type': 'empty_line', + 'value': line, + 'host': '', + 'order': order, + }) + order += 1 + continue + + if line.startswith('#'): + self._config.append({ + 'type': 'comment', + 'value': line, + 'host': '', + 'order': order, + }) + order += 1 + continue + + if '=' in line: + # Ensure ProxyCommand gets properly split + if line.lower().strip().startswith('proxycommand'): + proxy_re = re.compile(r"^(proxycommand)\s*=*\s*(.*)", re.I) + match = proxy_re.match(line) + key, value = match.group(1).lower(), match.group(2) + else: + key, value = line.split('=', 1) + key = key.strip().lower() + else: + # find first whitespace, and split there + i = 0 + while (i < len(line)) and not line[i].isspace(): + i += 1 + if i == len(line): + raise Exception(f'Unparsable line: {line!r}') + key = line[:i].lower() + value = line[i:].lstrip() + if key == 'host': + self._config.append(host) + value = value.split() + host = { + key: value, + 'config': {}, + 'type': 'entry', + 'order': order + } + order += 1 + elif key in ['identityfile', 'localforward', 'remoteforward']: + if key in host['config']: + host['config'][key].append(value) + else: + host['config'][key] = [value] + elif key not in host['config']: + host['config'].update({key: value}) + self._config.append(host) + + +class ConfigParser(object): + """ + Config parser for ~/.ssh/config files. + """ + + def __init__(self, ssh_config_file=None): + if not ssh_config_file: + ssh_config_file = self.get_default_ssh_config_file() + + self.defaults = {} + + self.ssh_config_file = ssh_config_file + + if not os.path.exists(self.ssh_config_file): + if not os.path.exists(os.path.dirname(self.ssh_config_file)): + os.makedirs(os.path.dirname(self.ssh_config_file)) + open(self.ssh_config_file, 'w+').close() + os.chmod(self.ssh_config_file, 0o600) + + self.config_data = [] + + def get_default_ssh_config_file(self): + return os.path.expanduser("~/.ssh/config") + + def load(self): + config = StormConfig() + + with open(self.ssh_config_file) as fd: + config.parse(fd) + + for entry in config.__dict__.get("_config"): + if entry.get("host") == ["*"]: + self.defaults.update(entry.get("config")) + + if entry.get("type") in ["comment", "empty_line"]: + self.config_data.append(entry) + continue + + host_item = { + 'host': entry["host"][0], + 'options': entry.get("config"), + 'type': 'entry', + 'order': entry.get("order", 0), + } + + if len(entry["host"]) > 1: + host_item.update({ + 'host': " ".join(entry["host"]), + }) + # minor bug in paramiko.SSHConfig that duplicates + # "Host *" entries. + if entry.get("config") and len(entry.get("config")) > 0: + self.config_data.append(host_item) + + return self.config_data + + def add_host(self, host, options): + self.config_data.append({ + 'host': host, + 'options': options, + 'order': self.get_last_index(), + }) + + return self + + def update_host(self, host, options, use_regex=False): + for index, host_entry in enumerate(self.config_data): + if host_entry.get("host") == host or \ + (use_regex and re.match(host, host_entry.get("host"))): + + if 'deleted_fields' in options: + deleted_fields = options.pop("deleted_fields") + for deleted_field in deleted_fields: + del self.config_data[index]["options"][deleted_field] + + self.config_data[index]["options"].update(options) + + return self + + def search_host(self, search_string): + results = [] + for host_entry in self.config_data: + if host_entry.get("type") != 'entry': + continue + if host_entry.get("host") == "*": + continue + + searchable_information = host_entry.get("host") + for key, value in host_entry.get("options").items(): + if isinstance(value, list): + value = " ".join(value) + if isinstance(value, int): + value = str(value) + + searchable_information += f" {value}" + + if search_string in searchable_information: + results.append(host_entry) + + return results + + def delete_host(self, host): + found = 0 + for index, host_entry in enumerate(self.config_data): + if host_entry.get("host") == host: + del self.config_data[index] + found += 1 + + if found == 0: + raise ValueError('No host found') + return self + + def delete_all_hosts(self): + self.config_data = [] + self.write_to_ssh_config() + + return self + + def dump(self): + if len(self.config_data) < 1: + return + + file_content = "" + self.config_data = sorted(self.config_data, key=itemgetter("order")) + + for host_item in self.config_data: + if host_item.get("type") in ['comment', 'empty_line']: + file_content += f"{host_item.get('value')}\n" + continue + host_item_content = f"Host {host_item.get('host')}\n" + for key, value in host_item.get("options").items(): + if isinstance(value, list): + sub_content = "" + for value_ in value: + sub_content += f" {key} {value_}\n" + host_item_content += sub_content + else: + host_item_content += f" {key} {value}\n" + file_content += host_item_content + + return file_content + + def write_to_ssh_config(self): + with open(self.ssh_config_file, 'w+') as f: + data = self.dump() + if data: + f.write(data) + return self + + def get_last_index(self): + last_index = 0 + indexes = [] + for item in self.config_data: + if item.get("order"): + indexes.append(item.get("order")) + if len(indexes) > 0: + last_index = max(indexes) + + return last_index diff --git a/plugins/module_utils/alicloud_ecs.py b/plugins/module_utils/alicloud_ecs.py index 8210793c76..e752b4aa4a 100644 --- a/plugins/module_utils/alicloud_ecs.py +++ b/plugins/module_utils/alicloud_ecs.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -10,8 +9,7 @@ # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import json @@ -90,10 +88,10 @@ def connect_to_acs(acs_module, region, **params): if not conn: if region not in [acs_module_region.id for acs_module_region in acs_module.regions()]: raise AnsibleACSError( - "Region %s does not seem to be available for acs module %s." % (region, acs_module.__name__)) + f"Region {region} does not seem to be available for acs module {acs_module.__name__}.") else: raise AnsibleACSError( - "Unknown problem connecting to region %s for acs module %s." % (region, acs_module.__name__)) + f"Unknown problem connecting to region {region} for acs module {acs_module.__name__}.") return conn @@ -127,7 +125,7 @@ def get_assume_role(params): def get_profile(params): if not params['alicloud_access_key'] and not params['ecs_role_name'] and params['profile']: - path = params['shared_credentials_file'] if params['shared_credentials_file'] else os.getenv('HOME') + '/.aliyun/config.json' + path = params['shared_credentials_file'] if params['shared_credentials_file'] else f"{os.getenv('HOME')}/.aliyun/config.json" auth = {} with open(path, 'r') as f: for pro in json.load(f)['profiles']: diff --git a/plugins/module_utils/android_sdkmanager.py b/plugins/module_utils/android_sdkmanager.py new file mode 100644 index 0000000000..b25a1a04fc --- /dev/null +++ b/plugins/module_utils/android_sdkmanager.py @@ -0,0 +1,146 @@ + +# Copyright (c) 2024, Stanislav Shamilov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +import re + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + +__state_map = { + "present": "--install", + "absent": "--uninstall" +} + +# sdkmanager --help 2>&1 | grep -A 2 -- --channel +__channel_map = { + "stable": 0, + "beta": 1, + "dev": 2, + "canary": 3 +} + + +def __map_channel(channel_name): + if channel_name not in __channel_map: + raise ValueError(f"Unknown channel name '{channel_name}'") + return __channel_map[channel_name] + + +def sdkmanager_runner(module, **kwargs): + return CmdRunner( + module, + command='sdkmanager', + arg_formats=dict( + state=cmd_runner_fmt.as_map(__state_map), + name=cmd_runner_fmt.as_list(), + installed=cmd_runner_fmt.as_fixed("--list_installed"), + list=cmd_runner_fmt.as_fixed('--list'), + newer=cmd_runner_fmt.as_fixed("--newer"), + sdk_root=cmd_runner_fmt.as_opt_eq_val("--sdk_root"), + channel=cmd_runner_fmt.as_func(lambda x: [f"--channel={__map_channel(x)}"]) + ), + force_lang="C.UTF-8", # Without this, sdkmanager binary crashes + **kwargs + ) + + +class Package: + def __init__(self, name): + self.name = name + + def __hash__(self): + return hash(self.name) + + def __ne__(self, other): + if not isinstance(other, Package): + return True + return self.name != other.name + + def __eq__(self, other): + if not isinstance(other, Package): + return False + + return self.name == other.name + + +class SdkManagerException(Exception): + pass + + +class AndroidSdkManager(object): + _RE_INSTALLED_PACKAGES_HEADER = re.compile(r'^Installed packages:$') + _RE_UPDATABLE_PACKAGES_HEADER = re.compile(r'^Available Updates:$') + + # Example: ' platform-tools | 27.0.0 | Android SDK Platform-Tools 27 | platform-tools ' + _RE_INSTALLED_PACKAGE = re.compile(r'^\s*(?P\S+)\s*\|\s*[0-9][^|]*\b\s*\|\s*.+\s*\|\s*(\S+)\s*$') + + # Example: ' platform-tools | 27.0.0 | 35.0.2' + _RE_UPDATABLE_PACKAGE = re.compile(r'^\s*(?P\S+)\s*\|\s*[0-9][^|]*\b\s*\|\s*[0-9].*\b\s*$') + + _RE_UNKNOWN_PACKAGE = re.compile(r'^Warning: Failed to find package \'(?P\S+)\'\s*$') + _RE_ACCEPT_LICENSE = re.compile(r'^The following packages can not be installed since their licenses or those of ' + r'the packages they depend on were not accepted') + + def __init__(self, module): + self.runner = sdkmanager_runner(module) + + def get_installed_packages(self): + with self.runner('installed sdk_root channel') as ctx: + rc, stdout, stderr = ctx.run() + return self._parse_packages(stdout, self._RE_INSTALLED_PACKAGES_HEADER, self._RE_INSTALLED_PACKAGE) + + def get_updatable_packages(self): + with self.runner('list newer sdk_root channel') as ctx: + rc, stdout, stderr = ctx.run() + return self._parse_packages(stdout, self._RE_UPDATABLE_PACKAGES_HEADER, self._RE_UPDATABLE_PACKAGE) + + def apply_packages_changes(self, packages, accept_licenses=False): + """ Install or delete packages, depending on the `module.vars.state` parameter """ + if len(packages) == 0: + return 0, '', '' + + if accept_licenses: + license_prompt_answer = 'y' + else: + license_prompt_answer = 'N' + for package in packages: + with self.runner('state name sdk_root channel', data=license_prompt_answer) as ctx: + rc, stdout, stderr = ctx.run(name=package.name) + + for line in stdout.splitlines(): + if self._RE_ACCEPT_LICENSE.match(line): + raise SdkManagerException("Licenses for some packages were not accepted") + + if rc != 0: + self._try_parse_stderr(stderr) + return rc, stdout, stderr + return 0, '', '' + + def _try_parse_stderr(self, stderr): + data = stderr.splitlines() + for line in data: + unknown_package_regex = self._RE_UNKNOWN_PACKAGE.match(line) + if unknown_package_regex: + package = unknown_package_regex.group('package') + raise SdkManagerException(f"Unknown package {package}") + + @staticmethod + def _parse_packages(stdout, header_regexp, row_regexp): + data = stdout.splitlines() + + section_found = False + packages = set() + + for line in data: + if not section_found: + section_found = header_regexp.match(line) + continue + else: + p = row_regexp.match(line) + if p: + packages.add(Package(p.group('name'))) + return packages diff --git a/plugins/module_utils/btrfs.py b/plugins/module_utils/btrfs.py new file mode 100644 index 0000000000..3c9ad3b382 --- /dev/null +++ b/plugins/module_utils/btrfs.py @@ -0,0 +1,460 @@ +# Copyright (c) 2022, Gregory Furlong +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from ansible.module_utils.common.text.converters import to_bytes +import re +import os + + +def normalize_subvolume_path(path): + """ + Normalizes btrfs subvolume paths to ensure exactly one leading slash, no trailing slashes and no consecutive slashes. + In addition, if the path is prefixed with a leading , this value is removed. + """ + fstree_stripped = re.sub(r'^', '', path) + result = re.sub(r'/+$', '', re.sub(r'/+', '/', f"/{fstree_stripped}")) + return result if len(result) > 0 else '/' + + +class BtrfsModuleException(Exception): + pass + + +class BtrfsCommands(object): + + """ + Provides access to a subset of the Btrfs command line + """ + + def __init__(self, module): + self.__module = module + self.__btrfs = self.__module.get_bin_path("btrfs", required=True) + + def filesystem_show(self): + command = f"{self.__btrfs} filesystem show -d" + result = self.__module.run_command(command, check_rc=True) + stdout = [x.strip() for x in result[1].splitlines()] + filesystems = [] + current = None + for line in stdout: + if line.startswith('Label'): + current = self.__parse_filesystem(line) + filesystems.append(current) + elif line.startswith('devid'): + current['devices'].append(self.__parse_filesystem_device(line)) + return filesystems + + def __parse_filesystem(self, line): + label = re.sub(r'\s*uuid:.*$', '', re.sub(r'^Label:\s*', '', line)) + id = re.sub(r'^.*uuid:\s*', '', line) + + filesystem = {} + filesystem['label'] = label.strip("'") if label != 'none' else None + filesystem['uuid'] = id + filesystem['devices'] = [] + filesystem['mountpoints'] = [] + filesystem['subvolumes'] = [] + filesystem['default_subvolid'] = None + return filesystem + + def __parse_filesystem_device(self, line): + return re.sub(r'^.*path\s', '', line) + + def subvolumes_list(self, filesystem_path): + command = f"{self.__btrfs} subvolume list -tap {filesystem_path}" + result = self.__module.run_command(command, check_rc=True) + stdout = [x.split('\t') for x in result[1].splitlines()] + subvolumes = [{'id': 5, 'parent': None, 'path': '/'}] + if len(stdout) > 2: + subvolumes.extend([self.__parse_subvolume_list_record(x) for x in stdout[2:]]) + return subvolumes + + def __parse_subvolume_list_record(self, item): + return { + 'id': int(item[0]), + 'parent': int(item[2]), + 'path': normalize_subvolume_path(item[5]), + } + + def subvolume_get_default(self, filesystem_path): + command = [self.__btrfs, "subvolume", "get-default", to_bytes(filesystem_path)] + result = self.__module.run_command(command, check_rc=True) + # ID [n] ... + return int(result[1].strip().split()[1]) + + def subvolume_set_default(self, filesystem_path, subvolume_id): + command = [self.__btrfs, "subvolume", "set-default", str(subvolume_id), to_bytes(filesystem_path)] + result = self.__module.run_command(command, check_rc=True) + + def subvolume_create(self, subvolume_path): + command = [self.__btrfs, "subvolume", "create", to_bytes(subvolume_path)] + result = self.__module.run_command(command, check_rc=True) + + def subvolume_snapshot(self, snapshot_source, snapshot_destination): + command = [self.__btrfs, "subvolume", "snapshot", to_bytes(snapshot_source), to_bytes(snapshot_destination)] + result = self.__module.run_command(command, check_rc=True) + + def subvolume_delete(self, subvolume_path): + command = [self.__btrfs, "subvolume", "delete", to_bytes(subvolume_path)] + result = self.__module.run_command(command, check_rc=True) + + +class BtrfsInfoProvider(object): + + """ + Utility providing details of the currently available btrfs filesystems + """ + + def __init__(self, module): + self.__module = module + self.__btrfs_api = BtrfsCommands(module) + self.__findmnt_path = self.__module.get_bin_path("findmnt", required=True) + + def get_filesystems(self): + filesystems = self.__btrfs_api.filesystem_show() + mountpoints = self.__find_mountpoints() + for filesystem in filesystems: + device_mountpoints = self.__filter_mountpoints_for_devices(mountpoints, filesystem['devices']) + filesystem['mountpoints'] = device_mountpoints + + if len(device_mountpoints) > 0: + + # any path within the filesystem can be used to query metadata + mountpoint = device_mountpoints[0]['mountpoint'] + filesystem['subvolumes'] = self.get_subvolumes(mountpoint) + filesystem['default_subvolid'] = self.get_default_subvolume_id(mountpoint) + + return filesystems + + def get_mountpoints(self, filesystem_devices): + mountpoints = self.__find_mountpoints() + return self.__filter_mountpoints_for_devices(mountpoints, filesystem_devices) + + def get_subvolumes(self, filesystem_path): + return self.__btrfs_api.subvolumes_list(filesystem_path) + + def get_default_subvolume_id(self, filesystem_path): + return self.__btrfs_api.subvolume_get_default(filesystem_path) + + def __filter_mountpoints_for_devices(self, mountpoints, devices): + return [m for m in mountpoints if (m['device'] in devices)] + + def __find_mountpoints(self): + command = f"{self.__findmnt_path} -t btrfs -nvP" + result = self.__module.run_command(command) + mountpoints = [] + if result[0] == 0: + lines = result[1].splitlines() + for line in lines: + mountpoint = self.__parse_mountpoint_pairs(line) + mountpoints.append(mountpoint) + return mountpoints + + def __parse_mountpoint_pairs(self, line): + pattern = re.compile(r'^TARGET="(?P.*)"\s+SOURCE="(?P.*)"\s+FSTYPE="(?P.*)"\s+OPTIONS="(?P.*)"\s*$') + match = pattern.search(line) + if match is not None: + groups = match.groupdict() + + return { + 'mountpoint': groups['target'], + 'device': groups['source'], + 'subvolid': self.__extract_mount_subvolid(groups['options']), + } + else: + raise BtrfsModuleException(f"Failed to parse findmnt result for line: '{line}'") + + def __extract_mount_subvolid(self, mount_options): + for option in mount_options.split(','): + if option.startswith('subvolid='): + return int(option[len('subvolid='):]) + raise BtrfsModuleException(f"Failed to find subvolid for mountpoint in options '{mount_options}'") + + +class BtrfsSubvolume(object): + + """ + Wrapper class providing convenience methods for inspection of a btrfs subvolume + """ + + def __init__(self, filesystem, subvolume_id): + self.__filesystem = filesystem + self.__subvolume_id = subvolume_id + + def get_filesystem(self): + return self.__filesystem + + def is_mounted(self): + mountpoints = self.get_mountpoints() + return mountpoints is not None and len(mountpoints) > 0 + + def is_filesystem_root(self): + return 5 == self.__subvolume_id + + def is_filesystem_default(self): + return self.__filesystem.default_subvolid == self.__subvolume_id + + def get_mounted_path(self): + mountpoints = self.get_mountpoints() + if mountpoints is not None and len(mountpoints) > 0: + return mountpoints[0] + elif self.parent is not None: + parent = self.__filesystem.get_subvolume_by_id(self.parent) + parent_path = parent.get_mounted_path() + if parent_path is not None: + return parent_path + os.path.sep + self.name + else: + return None + + def get_mountpoints(self): + return self.__filesystem.get_mountpoints_by_subvolume_id(self.__subvolume_id) + + def get_child_relative_path(self, absolute_child_path): + """ + Get the relative path from this subvolume to the named child subvolume. + The provided parameter is expected to be normalized as by normalize_subvolume_path. + """ + path = self.path + if absolute_child_path.startswith(path): + relative = absolute_child_path[len(path):] + return re.sub(r'^/*', '', relative) + else: + raise BtrfsModuleException(f"Path '{absolute_child_path}' doesn't start with '{path}'") + + def get_parent_subvolume(self): + parent_id = self.parent + return self.__filesystem.get_subvolume_by_id(parent_id) if parent_id is not None else None + + def get_child_subvolumes(self): + return self.__filesystem.get_subvolume_children(self.__subvolume_id) + + @property + def __info(self): + return self.__filesystem.get_subvolume_info_for_id(self.__subvolume_id) + + @property + def id(self): + return self.__subvolume_id + + @property + def name(self): + return self.path.split('/').pop() + + @property + def path(self): + return self.__info['path'] + + @property + def parent(self): + return self.__info['parent'] + + +class BtrfsFilesystem(object): + + """ + Wrapper class providing convenience methods for inspection of a btrfs filesystem + """ + + def __init__(self, info, provider, module): + self.__provider = provider + + # constant for module execution + self.__uuid = info['uuid'] + self.__label = info['label'] + self.__devices = info['devices'] + + # refreshable + self.__default_subvolid = info['default_subvolid'] if 'default_subvolid' in info else None + self.__update_mountpoints(info['mountpoints'] if 'mountpoints' in info else []) + self.__update_subvolumes(info['subvolumes'] if 'subvolumes' in info else []) + + @property + def uuid(self): + return self.__uuid + + @property + def label(self): + return self.__label + + @property + def default_subvolid(self): + return self.__default_subvolid + + @property + def devices(self): + return list(self.__devices) + + def refresh(self): + self.refresh_mountpoints() + self.refresh_subvolumes() + self.refresh_default_subvolume() + + def refresh_mountpoints(self): + mountpoints = self.__provider.get_mountpoints(list(self.__devices)) + self.__update_mountpoints(mountpoints) + + def __update_mountpoints(self, mountpoints): + self.__mountpoints = dict() + for i in mountpoints: + subvolid = i['subvolid'] + mountpoint = i['mountpoint'] + if subvolid not in self.__mountpoints: + self.__mountpoints[subvolid] = [] + self.__mountpoints[subvolid].append(mountpoint) + + def refresh_subvolumes(self): + filesystem_path = self.get_any_mountpoint() + if filesystem_path is not None: + subvolumes = self.__provider.get_subvolumes(filesystem_path) + self.__update_subvolumes(subvolumes) + + def __update_subvolumes(self, subvolumes): + # TODO strategy for retaining information on deleted subvolumes? + self.__subvolumes = dict() + for subvolume in subvolumes: + self.__subvolumes[subvolume['id']] = subvolume + + def refresh_default_subvolume(self): + filesystem_path = self.get_any_mountpoint() + if filesystem_path is not None: + self.__default_subvolid = self.__provider.get_default_subvolume_id(filesystem_path) + + def contains_device(self, device): + return device in self.__devices + + def contains_subvolume(self, subvolume): + return self.get_subvolume_by_name(subvolume) is not None + + def get_subvolume_by_id(self, subvolume_id): + return BtrfsSubvolume(self, subvolume_id) if subvolume_id in self.__subvolumes else None + + def get_subvolume_info_for_id(self, subvolume_id): + return self.__subvolumes[subvolume_id] if subvolume_id in self.__subvolumes else None + + def get_subvolume_by_name(self, subvolume): + for subvolume_info in self.__subvolumes.values(): + if subvolume_info['path'] == subvolume: + return BtrfsSubvolume(self, subvolume_info['id']) + return None + + def get_any_mountpoint(self): + for subvol_mountpoints in self.__mountpoints.values(): + if len(subvol_mountpoints) > 0: + return subvol_mountpoints[0] + # maybe error? + return None + + def get_any_mounted_subvolume(self): + for subvolid, subvol_mountpoints in self.__mountpoints.items(): + if len(subvol_mountpoints) > 0: + return self.get_subvolume_by_id(subvolid) + return None + + def get_mountpoints_by_subvolume_id(self, subvolume_id): + return self.__mountpoints[subvolume_id] if subvolume_id in self.__mountpoints else [] + + def get_nearest_subvolume(self, subvolume): + """Return the identified subvolume if existing, else the closest matching parent""" + subvolumes_by_path = self.__get_subvolumes_by_path() + while len(subvolume) > 1: + if subvolume in subvolumes_by_path: + return BtrfsSubvolume(self, subvolumes_by_path[subvolume]['id']) + else: + subvolume = re.sub(r'/[^/]+$', '', subvolume) + + return BtrfsSubvolume(self, 5) + + def get_mountpath_as_child(self, subvolume_name): + """Find a path to the target subvolume through a mounted ancestor""" + nearest = self.get_nearest_subvolume(subvolume_name) + if nearest.path == subvolume_name: + nearest = nearest.get_parent_subvolume() + if nearest is None or nearest.get_mounted_path() is None: + raise BtrfsModuleException(f"Failed to find a path '{subvolume_name}' through a mounted parent subvolume") + else: + return nearest.get_mounted_path() + os.path.sep + nearest.get_child_relative_path(subvolume_name) + + def get_subvolume_children(self, subvolume_id): + return [BtrfsSubvolume(self, x['id']) for x in self.__subvolumes.values() if x['parent'] == subvolume_id] + + def __get_subvolumes_by_path(self): + result = {} + for s in self.__subvolumes.values(): + path = s['path'] + result[path] = s + return result + + def is_mounted(self): + return self.__mountpoints is not None and len(self.__mountpoints) > 0 + + def get_summary(self): + subvolumes = [] + sources = self.__subvolumes.values() if self.__subvolumes is not None else [] + for subvolume in sources: + id = subvolume['id'] + subvolumes.append({ + 'id': id, + 'path': subvolume['path'], + 'parent': subvolume['parent'], + 'mountpoints': self.get_mountpoints_by_subvolume_id(id), + }) + + return { + 'default_subvolume': self.__default_subvolid, + 'devices': self.__devices, + 'label': self.__label, + 'uuid': self.__uuid, + 'subvolumes': subvolumes, + } + + +class BtrfsFilesystemsProvider(object): + + """ + Provides methods to query available btrfs filesystems + """ + + def __init__(self, module): + self.__module = module + self.__provider = BtrfsInfoProvider(module) + self.__filesystems = None + + def get_matching_filesystem(self, criteria): + if criteria['device'] is not None: + criteria['device'] = os.path.realpath(criteria['device']) + + self.__check_init() + matching = [f for f in self.__filesystems.values() if self.__filesystem_matches_criteria(f, criteria)] + if len(matching) == 1: + return matching[0] + else: + raise BtrfsModuleException( + f"Found {len(matching)} filesystems matching criteria uuid={criteria['uuid']} label={criteria['label']} device={criteria['device']}" + ) + + def __filesystem_matches_criteria(self, filesystem, criteria): + return ((criteria['uuid'] is None or filesystem.uuid == criteria['uuid']) and + (criteria['label'] is None or filesystem.label == criteria['label']) and + (criteria['device'] is None or filesystem.contains_device(criteria['device']))) + + def get_filesystem_for_device(self, device): + real_device = os.path.realpath(device) + self.__check_init() + for fs in self.__filesystems.values(): + if fs.contains_device(real_device): + return fs + return None + + def get_filesystems(self): + self.__check_init() + return list(self.__filesystems.values()) + + def __check_init(self): + if self.__filesystems is None: + self.__filesystems = dict() + for f in self.__provider.get_filesystems(): + uuid = f['uuid'] + self.__filesystems[uuid] = BtrfsFilesystem(f, self.__provider, self.__module) diff --git a/plugins/module_utils/cloud.py b/plugins/module_utils/cloud.py index 092fe16ada..c8043a8d9e 100644 --- a/plugins/module_utils/cloud.py +++ b/plugins/module_utils/cloud.py @@ -1,11 +1,9 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2016 Allen Sanabria, # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations """ @@ -134,7 +132,7 @@ class CloudRetry(object): if isinstance(e, cls.base_class): # pylint: disable=isinstance-second-argument-not-valid-type response_code = cls.status_code_from_exception(e) if cls.found(response_code, catch_extra_error_codes): - msg = "{0}: Retrying in {1} seconds...".format(str(e), delay) + msg = f"{e}: Retrying in {delay} seconds..." syslog.syslog(syslog.LOG_INFO, msg) time.sleep(delay) else: diff --git a/plugins/module_utils/cmd_runner.py b/plugins/module_utils/cmd_runner.py index 21d61a6a5c..b4903e1452 100644 --- a/plugins/module_utils/cmd_runner.py +++ b/plugins/module_utils/cmd_runner.py @@ -1,15 +1,14 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2022, Alexei Znamensky # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -from functools import wraps +import os from ansible.module_utils.common.collections import is_sequence -from ansible.module_utils.six import iteritems +from ansible.module_utils.common.locale import get_best_parsable_locale +from ansible_collections.community.general.plugins.module_utils import cmd_runner_fmt def _ensure_list(value): @@ -31,18 +30,10 @@ class MissingArgumentFormat(CmdRunnerException): self.args_formats = args_formats def __repr__(self): - return "MissingArgumentFormat({0!r}, {1!r}, {2!r})".format( - self.arg, - self.args_order, - self.args_formats, - ) + return f"MissingArgumentFormat({self.arg!r}, {self.args_order!r}, {self.args_formats!r})" def __str__(self): - return "Cannot find format for parameter {0} {1} in: {2}".format( - self.arg, - self.args_order, - self.args_formats, - ) + return f"Cannot find format for parameter {self.arg} {self.args_order} in: {self.args_formats}" class MissingArgumentValue(CmdRunnerException): @@ -51,16 +42,10 @@ class MissingArgumentValue(CmdRunnerException): self.arg = arg def __repr__(self): - return "MissingArgumentValue({0!r}, {1!r})".format( - self.args_order, - self.arg, - ) + return f"MissingArgumentValue({self.args_order!r}, {self.arg!r})" def __str__(self): - return "Cannot find value for parameter {0} in {1}".format( - self.arg, - self.args_order, - ) + return f"Cannot find value for parameter {self.arg} in {self.args_order}" class FormatError(CmdRunnerException): @@ -72,104 +57,10 @@ class FormatError(CmdRunnerException): super(FormatError, self).__init__() def __repr__(self): - return "FormatError({0!r}, {1!r}, {2!r}, {3!r})".format( - self.name, - self.value, - self.args_formats, - self.exc, - ) + return f"FormatError({self.name!r}, {self.value!r}, {self.args_formats!r}, {self.exc!r})" def __str__(self): - return "Failed to format parameter {0} with value {1}: {2}".format( - self.name, - self.value, - self.exc, - ) - - -class _ArgFormat(object): - def __init__(self, func, ignore_none=None, ignore_missing_value=False): - self.func = func - self.ignore_none = ignore_none - self.ignore_missing_value = ignore_missing_value - - def __call__(self, value, ctx_ignore_none): - ignore_none = self.ignore_none if self.ignore_none is not None else ctx_ignore_none - if value is None and ignore_none: - return [] - f = self.func - return [str(x) for x in f(value)] - - -class _Format(object): - @staticmethod - def as_bool(args_true, args_false=None, ignore_none=None): - if args_false is not None: - if ignore_none is None: - ignore_none = False - else: - args_false = [] - return _ArgFormat(lambda value: _ensure_list(args_true) if value else _ensure_list(args_false), ignore_none=ignore_none) - - @staticmethod - def as_bool_not(args): - return _ArgFormat(lambda value: [] if value else _ensure_list(args), ignore_none=False) - - @staticmethod - def as_optval(arg, ignore_none=None): - return _ArgFormat(lambda value: ["{0}{1}".format(arg, value)], ignore_none=ignore_none) - - @staticmethod - def as_opt_val(arg, ignore_none=None): - return _ArgFormat(lambda value: [arg, value], ignore_none=ignore_none) - - @staticmethod - def as_opt_eq_val(arg, ignore_none=None): - return _ArgFormat(lambda value: ["{0}={1}".format(arg, value)], ignore_none=ignore_none) - - @staticmethod - def as_list(ignore_none=None): - return _ArgFormat(_ensure_list, ignore_none=ignore_none) - - @staticmethod - def as_fixed(args): - return _ArgFormat(lambda value: _ensure_list(args), ignore_none=False, ignore_missing_value=True) - - @staticmethod - def as_func(func, ignore_none=None): - return _ArgFormat(func, ignore_none=ignore_none) - - @staticmethod - def as_map(_map, default=None, ignore_none=None): - if default is None: - default = [] - return _ArgFormat(lambda value: _ensure_list(_map.get(value, default)), ignore_none=ignore_none) - - @staticmethod - def as_default_type(_type, arg="", ignore_none=None): - fmt = _Format - if _type == "dict": - return fmt.as_func(lambda d: ["--{0}={1}".format(*a) for a in iteritems(d)], ignore_none=ignore_none) - if _type == "list": - return fmt.as_func(lambda value: ["--{0}".format(x) for x in value], ignore_none=ignore_none) - if _type == "bool": - return fmt.as_bool("--{0}".format(arg)) - - return fmt.as_opt_val("--{0}".format(arg), ignore_none=ignore_none) - - @staticmethod - def unpack_args(func): - @wraps(func) - def wrapper(v): - return func(*v) - return wrapper - - @staticmethod - def unpack_kwargs(func): - @wraps(func) - def wrapper(v): - return func(**v) - return wrapper + return f"Failed to format parameter {self.name} with value {self.value}: {self.exc}" class CmdRunner(object): @@ -191,21 +82,32 @@ class CmdRunner(object): self.default_args_order = self._prepare_args_order(default_args_order) if arg_formats is None: arg_formats = {} - self.arg_formats = dict(arg_formats) + self.arg_formats = {} + for fmt_name, fmt in arg_formats.items(): + if not cmd_runner_fmt.is_argformat(fmt): + fmt = cmd_runner_fmt.as_func(func=fmt, ignore_none=True) + self.arg_formats[fmt_name] = fmt self.check_rc = check_rc - self.force_lang = force_lang + if force_lang == "auto": + try: + self.force_lang = get_best_parsable_locale(module) + except RuntimeWarning: + self.force_lang = "C" + else: + self.force_lang = force_lang self.path_prefix = path_prefix if environ_update is None: environ_update = {} self.environ_update = environ_update - self.command[0] = module.get_bin_path(self.command[0], opt_dirs=path_prefix, required=True) + _cmd = self.command[0] + self.command[0] = _cmd if (os.path.isabs(_cmd) or '/' in _cmd) else module.get_bin_path(_cmd, opt_dirs=path_prefix, required=True) - for mod_param_name, spec in iteritems(module.argument_spec): - if mod_param_name not in self.arg_formats: - self.arg_formats[mod_param_name] = _Format.as_default_type(spec['type'], mod_param_name) + @property + def binary(self): + return self.command[0] - def __call__(self, args_order=None, output_process=None, ignore_value_none=True, check_mode_skip=False, check_mode_return=None, **kwargs): + def __call__(self, args_order=None, output_process=None, check_mode_skip=False, check_mode_return=None, **kwargs): if output_process is None: output_process = _process_as_is if args_order is None: @@ -217,7 +119,6 @@ class CmdRunner(object): return _CmdRunnerContext(runner=self, args_order=args_order, output_process=output_process, - ignore_value_none=ignore_value_none, check_mode_skip=check_mode_skip, check_mode_return=check_mode_return, **kwargs) @@ -229,11 +130,10 @@ class CmdRunner(object): class _CmdRunnerContext(object): - def __init__(self, runner, args_order, output_process, ignore_value_none, check_mode_skip, check_mode_return, **kwargs): + def __init__(self, runner, args_order, output_process, check_mode_skip, check_mode_return, **kwargs): self.runner = runner self.args_order = tuple(args_order) self.output_process = output_process - self.ignore_value_none = ignore_value_none self.check_mode_skip = check_mode_skip self.check_mode_return = check_mode_return self.run_command_args = dict(kwargs) @@ -272,7 +172,7 @@ class _CmdRunnerContext(object): value = named_args[arg_name] elif not runner.arg_formats[arg_name].ignore_missing_value: raise MissingArgumentValue(self.args_order, arg_name) - self.cmd.extend(runner.arg_formats[arg_name](value, ctx_ignore_none=self.ignore_value_none)) + self.cmd.extend(runner.arg_formats[arg_name](value)) except MissingArgumentValue: raise except Exception as e: @@ -288,7 +188,6 @@ class _CmdRunnerContext(object): @property def run_info(self): return dict( - ignore_value_none=self.ignore_value_none, check_rc=self.check_rc, environ_update=self.environ_update, args_order=self.args_order, @@ -306,14 +205,3 @@ class _CmdRunnerContext(object): def __exit__(self, exc_type, exc_val, exc_tb): return False - - -cmd_runner_fmt = _Format() - -# -# The fmt form is deprecated and will be removed in community.general 7.0.0 -# Please use: -# cmd_runner_fmt -# Or, to retain the same effect, use: -# from ansible_collections.community.general.plugins.module_utils.cmd_runner import cmd_runner_fmt as fmt -fmt = cmd_runner_fmt diff --git a/plugins/module_utils/cmd_runner_fmt.py b/plugins/module_utils/cmd_runner_fmt.py new file mode 100644 index 0000000000..dcb9fc8e20 --- /dev/null +++ b/plugins/module_utils/cmd_runner_fmt.py @@ -0,0 +1,116 @@ +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from functools import wraps + +from ansible.module_utils.common.collections import is_sequence + + +def _ensure_list(value): + return list(value) if is_sequence(value) else [value] + + +class _ArgFormat(object): + def __init__(self, func, ignore_none=True, ignore_missing_value=False): + self.func = func + self.ignore_none = ignore_none + self.ignore_missing_value = ignore_missing_value + + def __call__(self, value): + ignore_none = self.ignore_none if self.ignore_none is not None else True + if value is None and ignore_none: + return [] + f = self.func + return [str(x) for x in f(value)] + + def __str__(self): + return f"" + + def __repr__(self): + return str(self) + + +def as_bool(args_true, args_false=None, ignore_none=None): + if args_false is not None: + if ignore_none is None: + ignore_none = False + else: + args_false = [] + return _ArgFormat(lambda value: _ensure_list(args_true) if value else _ensure_list(args_false), ignore_none=ignore_none) + + +def as_bool_not(args): + return as_bool([], args, ignore_none=False) + + +def as_optval(arg, ignore_none=None): + return _ArgFormat(lambda value: [f"{arg}{value}"], ignore_none=ignore_none) + + +def as_opt_val(arg, ignore_none=None): + return _ArgFormat(lambda value: [arg, value], ignore_none=ignore_none) + + +def as_opt_eq_val(arg, ignore_none=None): + return _ArgFormat(lambda value: [f"{arg}={value}"], ignore_none=ignore_none) + + +def as_list(ignore_none=None, min_len=0, max_len=None): + def func(value): + value = _ensure_list(value) + if len(value) < min_len: + raise ValueError(f"Parameter must have at least {min_len} element(s)") + if max_len is not None and len(value) > max_len: + raise ValueError(f"Parameter must have at most {max_len} element(s)") + return value + return _ArgFormat(func, ignore_none=ignore_none) + + +def as_fixed(*args): + if len(args) == 1 and is_sequence(args[0]): + args = args[0] + return _ArgFormat(lambda value: _ensure_list(args), ignore_none=False, ignore_missing_value=True) + + +def as_func(func, ignore_none=None): + return _ArgFormat(func, ignore_none=ignore_none) + + +def as_map(_map, default=None, ignore_none=None): + if default is None: + default = [] + return _ArgFormat(lambda value: _ensure_list(_map.get(value, default)), ignore_none=ignore_none) + + +def unpack_args(func): + @wraps(func) + def wrapper(v): + return func(*v) + return wrapper + + +def unpack_kwargs(func): + @wraps(func) + def wrapper(v): + return func(**v) + return wrapper + + +def stack(fmt): + @wraps(fmt) + def wrapper(*args, **kwargs): + new_func = fmt(ignore_none=True, *args, **kwargs) + + def stacking(value): + stack = [new_func(v) for v in value if v] + stack = [x for args in stack for x in args] + return stack + return _ArgFormat(stacking, ignore_none=True) + return wrapper + + +def is_argformat(fmt): + return isinstance(fmt, _ArgFormat) diff --git a/plugins/module_utils/consul.py b/plugins/module_utils/consul.py new file mode 100644 index 0000000000..b814485c55 --- /dev/null +++ b/plugins/module_utils/consul.py @@ -0,0 +1,349 @@ + +# Copyright (c) 2022, Håkon Lerring +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +import copy +import json +import re +from urllib import error as urllib_error +from urllib.parse import urlencode + +from ansible.module_utils.urls import open_url + + +def get_consul_url(configuration): + return f"{configuration.scheme}://{configuration.host}:{configuration.port}/v1" + + +def get_auth_headers(configuration): + if configuration.token is None: + return {} + else: + return {"X-Consul-Token": configuration.token} + + +class RequestError(Exception): + def __init__(self, status, response_data=None): + self.status = status + self.response_data = response_data + + def __str__(self): + if self.response_data is None: + # self.status is already the message (backwards compat) + return self.status + return f"HTTP {self.status}: {self.response_data}" + + +def handle_consul_response_error(response): + if 400 <= response.status_code < 600: + raise RequestError(f"{response.status_code} {response.content}") + + +AUTH_ARGUMENTS_SPEC = dict( + host=dict(default="localhost"), + port=dict(type="int", default=8500), + scheme=dict(default="http"), + validate_certs=dict(type="bool", default=True), + token=dict(no_log=True), + ca_path=dict(), +) + + +def camel_case_key(key): + parts = [] + for part in key.split("_"): + if part in {"id", "ttl", "jwks", "jwt", "oidc", "iam", "sts"}: + parts.append(part.upper()) + else: + parts.append(part.capitalize()) + return "".join(parts) + + +def validate_check(check): + validate_duration_keys = ['Interval', 'Ttl', 'Timeout'] + validate_tcp_regex = r"(?P.*):(?P(?:[0-9]+))$" + if check.get('Tcp') is not None: + match = re.match(validate_tcp_regex, check['Tcp']) + if not match: + raise Exception('tcp check must be in host:port format') + for duration in validate_duration_keys: + if duration in check and check[duration] is not None: + check[duration] = validate_duration(check[duration]) + + +def validate_duration(duration): + if duration: + if not re.search(r"\d+(?:ns|us|ms|s|m|h)", duration): + duration = f"{duration}s" + return duration + + +STATE_PARAMETER = "state" +STATE_PRESENT = "present" +STATE_ABSENT = "absent" + +OPERATION_READ = "read" +OPERATION_CREATE = "create" +OPERATION_UPDATE = "update" +OPERATION_DELETE = "remove" + + +def _normalize_params(params, arg_spec): + final_params = {} + for k, v in params.items(): + if k not in arg_spec or v is None: # Alias + continue + spec = arg_spec[k] + if ( + spec.get("type") == "list" + and spec.get("elements") == "dict" + and spec.get("options") + and v + ): + v = [_normalize_params(d, spec["options"]) for d in v] + elif spec.get("type") == "dict" and spec.get("options") and v: + v = _normalize_params(v, spec["options"]) + final_params[k] = v + return final_params + + +class _ConsulModule: + """Base class for Consul modules. + + This class is considered private, till the API is fully fleshed out. + As such backwards incompatible changes can occur even in bugfix releases. + """ + + api_endpoint = None # type: str + unique_identifiers = None # type: list + result_key = None # type: str + create_only_fields = set() + operational_attributes = set() + params = {} + + def __init__(self, module): + self._module = module + self.params = _normalize_params(module.params, module.argument_spec) + self.api_params = { + k: camel_case_key(k) + for k in self.params + if k not in STATE_PARAMETER and k not in AUTH_ARGUMENTS_SPEC + } + + self.operational_attributes.update({"CreateIndex", "CreateTime", "Hash", "ModifyIndex"}) + + def execute(self): + obj = self.read_object() + + changed = False + diff = {} + if self.params[STATE_PARAMETER] == STATE_PRESENT: + obj_from_module = self.module_to_obj(obj is not None) + if obj is None: + operation = OPERATION_CREATE + new_obj = self.create_object(obj_from_module) + diff = {"before": {}, "after": new_obj} + changed = True + else: + operation = OPERATION_UPDATE + if self._needs_update(obj, obj_from_module): + new_obj = self.update_object(obj, obj_from_module) + diff = {"before": obj, "after": new_obj} + changed = True + else: + new_obj = obj + elif self.params[STATE_PARAMETER] == STATE_ABSENT: + operation = OPERATION_DELETE + if obj is not None: + self.delete_object(obj) + changed = True + diff = {"before": obj, "after": {}} + else: + diff = {"before": {}, "after": {}} + new_obj = None + else: + raise RuntimeError("Unknown state supplied.") + + result = {"changed": changed} + if changed: + result["operation"] = operation + if self._module._diff: + result["diff"] = diff + if self.result_key: + result[self.result_key] = new_obj + self._module.exit_json(**result) + + def module_to_obj(self, is_update): + obj = {} + for k, v in self.params.items(): + result = self.map_param(k, v, is_update) + if result: + obj[result[0]] = result[1] + return obj + + def map_param(self, k, v, is_update): + def helper(item): + return {camel_case_key(k): v for k, v in item.items()} + + def needs_camel_case(k): + spec = self._module.argument_spec[k] + return ( + spec.get("type") == "list" + and spec.get("elements") == "dict" + and spec.get("options") + ) or (spec.get("type") == "dict" and spec.get("options")) + + if k in self.api_params and v is not None: + if isinstance(v, dict) and needs_camel_case(k): + v = helper(v) + elif isinstance(v, (list, tuple)) and needs_camel_case(k): + v = [helper(i) for i in v] + if is_update and k in self.create_only_fields: + return + return camel_case_key(k), v + + def _needs_update(self, api_obj, module_obj): + api_obj = copy.deepcopy(api_obj) + module_obj = copy.deepcopy(module_obj) + return self.needs_update(api_obj, module_obj) + + def needs_update(self, api_obj, module_obj): + for k, v in module_obj.items(): + if k not in api_obj: + return True + if api_obj[k] != v: + return True + return False + + def prepare_object(self, existing, obj): + existing = { + k: v for k, v in existing.items() if k not in self.operational_attributes + } + for k, v in obj.items(): + existing[k] = v + return existing + + def id_from_obj(self, obj, camel_case=False): + def key_func(key): + return camel_case_key(key) if camel_case else key + + if self.unique_identifiers: + for identifier in self.unique_identifiers: + identifier = key_func(identifier) + if identifier in obj: + return obj[identifier] + return None + + def endpoint_url(self, operation, identifier=None): + if operation == OPERATION_CREATE: + return self.api_endpoint + elif identifier: + return f"{self.api_endpoint}/{identifier}" + raise RuntimeError("invalid arguments passed") + + def read_object(self): + identifier = self.id_from_obj(self.params) + url = self.endpoint_url(OPERATION_READ, identifier) + try: + return self.get(url) + except RequestError as e: + if e.status == 404: + return + elif e.status == 403 and b"ACL not found" in e.response_data: + return + raise + + def create_object(self, obj): + if self._module.check_mode: + return obj + else: + url = self.endpoint_url(OPERATION_CREATE) + created_obj = self.put(url, data=self.prepare_object({}, obj)) + if created_obj is None: + created_obj = self.read_object() + return created_obj + + def update_object(self, existing, obj): + merged_object = self.prepare_object(existing, obj) + if self._module.check_mode: + return merged_object + else: + url = self.endpoint_url(OPERATION_UPDATE, self.id_from_obj(existing, camel_case=True)) + updated_obj = self.put(url, data=merged_object) + if updated_obj is None: + updated_obj = self.read_object() + return updated_obj + + def delete_object(self, obj): + if self._module.check_mode: + return {} + else: + url = self.endpoint_url(OPERATION_DELETE, self.id_from_obj(obj, camel_case=True)) + return self.delete(url) + + def _request(self, method, url_parts, data=None, params=None): + module_params = self.params + + if not isinstance(url_parts, (tuple, list)): + url_parts = [url_parts] + if params: + # Remove values that are None + params = {k: v for k, v in params.items() if v is not None} + + ca_path = module_params.get("ca_path") + base_url = f"{module_params['scheme']}://{module_params['host']}:{module_params['port']}/v1" + url = "/".join([base_url] + list(url_parts)) + + headers = {} + token = self.params.get("token") + if token: + headers["X-Consul-Token"] = token + + try: + if data is not None: + data = json.dumps(data) + headers["Content-Type"] = "application/json" + if params: + url = f"{url}?{urlencode(params)}" + response = open_url( + url, + method=method, + data=data, + headers=headers, + validate_certs=module_params["validate_certs"], + ca_path=ca_path, + ) + response_data = response.read() + status = ( + response.status if hasattr(response, "status") else response.getcode() + ) + + except urllib_error.URLError as e: + if isinstance(e, urllib_error.HTTPError): + status = e.code + response_data = e.fp.read() + else: + self._module.fail_json( + msg=f"Could not connect to consul agent at {module_params['host']}:{module_params['port']}, error was {e}" + ) + raise + + if 400 <= status < 600: + raise RequestError(status, response_data) + + if response_data: + return json.loads(response_data) + return None + + def get(self, url_parts, **kwargs): + return self._request("GET", url_parts, **kwargs) + + def put(self, url_parts, **kwargs): + return self._request("PUT", url_parts, **kwargs) + + def delete(self, url_parts, **kwargs): + return self._request("DELETE", url_parts, **kwargs) diff --git a/plugins/module_utils/csv.py b/plugins/module_utils/csv.py index 50d2cb3868..3003875c09 100644 --- a/plugins/module_utils/csv.py +++ b/plugins/module_utils/csv.py @@ -1,18 +1,15 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2021, Andrew Pantuso (@ajpantuso) # Copyright (c) 2018, Dag Wieers (@dagwieers) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import csv -from io import BytesIO, StringIO +from io import StringIO from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.six import PY3 class CustomDialectFailureError(Exception): @@ -40,28 +37,27 @@ def initialize_dialect(dialect, **kwargs): csv.register_dialect("unix", unix_dialect) if dialect not in csv.list_dialects(): - raise DialectNotAvailableError("Dialect '%s' is not supported by your version of python." % dialect) + raise DialectNotAvailableError(f"Dialect '{dialect}' is not supported by your version of python.") # Create a dictionary from only set options - dialect_params = dict((k, v) for k, v in kwargs.items() if v is not None) + dialect_params = {k: v for k, v in kwargs.items() if v is not None} if dialect_params: try: csv.register_dialect('custom', dialect, **dialect_params) except TypeError as e: - raise CustomDialectFailureError("Unable to create custom dialect: %s" % to_native(e)) + raise CustomDialectFailureError(f"Unable to create custom dialect: {e}") dialect = 'custom' return dialect def read_csv(data, dialect, fieldnames=None): - + BOM = to_native('\ufeff') data = to_native(data, errors='surrogate_or_strict') + if data.startswith(BOM): + data = data[len(BOM):] - if PY3: - fake_fh = StringIO(data) - else: - fake_fh = BytesIO(data) + fake_fh = StringIO(data) reader = csv.DictReader(fake_fh, fieldnames=fieldnames, dialect=dialect) diff --git a/plugins/module_utils/database.py b/plugins/module_utils/database.py index db1dc9c23d..bb4c0efcee 100644 --- a/plugins/module_utils/database.py +++ b/plugins/module_utils/database.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -10,8 +9,7 @@ # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re @@ -104,19 +102,19 @@ def _identifier_parse(identifier, quote_char): dot = identifier.index('.') except ValueError: identifier = identifier.replace(quote_char, quote_char * 2) - identifier = ''.join((quote_char, identifier, quote_char)) + identifier = f"{quote_char}{identifier}{quote_char}" further_identifiers = [identifier] else: if dot == 0 or dot >= len(identifier) - 1: identifier = identifier.replace(quote_char, quote_char * 2) - identifier = ''.join((quote_char, identifier, quote_char)) + identifier = f"{quote_char}{identifier}{quote_char}" further_identifiers = [identifier] else: first_identifier = identifier[:dot] next_identifier = identifier[dot + 1:] further_identifiers = _identifier_parse(next_identifier, quote_char) first_identifier = first_identifier.replace(quote_char, quote_char * 2) - first_identifier = ''.join((quote_char, first_identifier, quote_char)) + first_identifier = f"{quote_char}{first_identifier}{quote_char}" further_identifiers.insert(0, first_identifier) return further_identifiers @@ -125,14 +123,14 @@ def _identifier_parse(identifier, quote_char): def pg_quote_identifier(identifier, id_type): identifier_fragments = _identifier_parse(identifier, quote_char='"') if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]: - raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _PG_IDENTIFIER_TO_DOT_LEVEL[id_type])) + raise SQLParseError(f'PostgreSQL does not support {id_type} with more than {_PG_IDENTIFIER_TO_DOT_LEVEL[id_type]} dots') return '.'.join(identifier_fragments) def mysql_quote_identifier(identifier, id_type): identifier_fragments = _identifier_parse(identifier, quote_char='`') if (len(identifier_fragments) - 1) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]: - raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type])) + raise SQLParseError(f'MySQL does not support {id_type} with more than {_MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]} dots') special_cased_fragments = [] for fragment in identifier_fragments: @@ -187,5 +185,4 @@ def check_input(module, *args): dangerous_elements.append(elem) if dangerous_elements: - module.fail_json(msg="Passed input '%s' is " - "potentially dangerous" % ', '.join(dangerous_elements)) + module.fail_json(msg=f"Passed input '{', '.join(dangerous_elements)}' is potentially dangerous") diff --git a/plugins/module_utils/datetime.py b/plugins/module_utils/datetime.py new file mode 100644 index 0000000000..f11375f0eb --- /dev/null +++ b/plugins/module_utils/datetime.py @@ -0,0 +1,30 @@ +# +# Copyright (c) 2023 Felix Fontein +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause + +from __future__ import annotations + +import datetime as _datetime +import sys + + +_USE_TIMEZONE = sys.version_info >= (3, 6) + + +def ensure_timezone_info(value): + if not _USE_TIMEZONE or value.tzinfo is not None: + return value + return value.astimezone(_datetime.timezone.utc) + + +def fromtimestamp(value): + if _USE_TIMEZONE: + return _datetime.fromtimestamp(value, tz=_datetime.timezone.utc) + return _datetime.utcfromtimestamp(value) + + +def now(): + if _USE_TIMEZONE: + return _datetime.datetime.now(tz=_datetime.timezone.utc) + return _datetime.datetime.utcnow() diff --git a/plugins/module_utils/deps.py b/plugins/module_utils/deps.py index bfb94cbc09..a24cd63838 100644 --- a/plugins/module_utils/deps.py +++ b/plugins/module_utils/deps.py @@ -1,11 +1,9 @@ -# -*- coding: utf-8 -*- # (c) 2022, Alexei Znamensky # Copyright (c) 2022, Ansible Project # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import traceback @@ -50,12 +48,12 @@ class _Dependency(object): def failed(self): return self.state == 1 - def verify(self, module): + def validate(self, module): if self.failed: module.fail_json(msg=self.message, exception=self.trace) def __str__(self): - return "".format(self.name, self._states[self.state]) + return f"" @contextmanager @@ -71,20 +69,32 @@ def declare(name, *args, **kwargs): _deps[name] = dep -def validate(module, spec=None): +def _select_names(spec): dep_names = sorted(_deps) - if spec is not None: + if spec: if spec.startswith("-"): spec_split = spec[1:].split(":") for d in spec_split: dep_names.remove(d) else: - spec_split = spec[1:].split(":") + spec_split = spec.split(":") dep_names = [] for d in spec_split: _deps[d] # ensure it exists dep_names.append(d) - for dep in dep_names: - _deps[dep].verify(module) + return dep_names + + +def validate(module, spec=None): + for dep in _select_names(spec): + _deps[dep].validate(module) + + +def failed(spec=None): + return any(_deps[d].failed for d in _select_names(spec)) + + +def clear(): + _deps.clear() diff --git a/plugins/module_utils/dimensiondata.py b/plugins/module_utils/dimensiondata.py index 308615bfe4..a0430b445e 100644 --- a/plugins/module_utils/dimensiondata.py +++ b/plugins/module_utils/dimensiondata.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2016 Dimension Data # @@ -12,22 +11,22 @@ # # Common functionality to be used by various module components -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations +import configparser import os import re import traceback -from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.six.moves import configparser +# (TODO: remove AnsibleModule from next line!) +from ansible.module_utils.basic import AnsibleModule, missing_required_lib # noqa: F401, pylint: disable=unused-import from os.path import expanduser from uuid import UUID LIBCLOUD_IMP_ERR = None try: - from libcloud.common.dimensiondata import API_ENDPOINTS, DimensionDataAPIException, DimensionDataStatus - from libcloud.compute.base import Node, NodeLocation + from libcloud.common.dimensiondata import API_ENDPOINTS, DimensionDataAPIException, DimensionDataStatus # noqa: F401, pylint: disable=unused-import + from libcloud.compute.base import Node, NodeLocation # noqa: F401, pylint: disable=unused-import from libcloud.compute.providers import get_driver from libcloud.compute.types import Provider @@ -38,7 +37,7 @@ except ImportError: LIBCLOUD_IMP_ERR = traceback.format_exc() HAS_LIBCLOUD = False -# MCP 2.x version patten for location (datacenter) names. +# MCP 2.x version pattern for location (datacenter) names. # # Note that this is not a totally reliable way of determining MCP version. # Unfortunately, libcloud's NodeLocation currently makes no provision for extended properties. @@ -74,7 +73,7 @@ class DimensionDataModule(object): # Region and location are common to all Dimension Data modules. region = self.module.params['region'] - self.region = 'dd-{0}'.format(region) + self.region = f'dd-{region}' self.location = self.module.params['location'] libcloud.security.VERIFY_SSL_CERT = self.module.params['validate_certs'] @@ -141,7 +140,7 @@ class DimensionDataModule(object): if not user_id or not key: home = expanduser('~') config = configparser.RawConfigParser() - config.read("%s/.dimensiondata" % home) + config.read(f"{home}/.dimensiondata") try: user_id = config.get("dimensiondatacloud", "MCP_USER") @@ -191,7 +190,7 @@ class DimensionDataModule(object): if network_domain: return network_domain - raise UnknownNetworkError("Network '%s' could not be found" % locator) + raise UnknownNetworkError(f"Network '{locator}' could not be found") def get_vlan(self, locator, location, network_domain): """ @@ -213,7 +212,7 @@ class DimensionDataModule(object): if vlan: return vlan - raise UnknownVLANError("VLAN '%s' could not be found" % locator) + raise UnknownVLANError(f"VLAN '{locator}' could not be found") @staticmethod def argument_spec(**additional_argument_spec): diff --git a/plugins/module_utils/django.py b/plugins/module_utils/django.py new file mode 100644 index 0000000000..4c052a1d6e --- /dev/null +++ b/plugins/module_utils/django.py @@ -0,0 +1,150 @@ +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +from ansible.module_utils.common.dict_transformations import dict_merge +from ansible_collections.community.general.plugins.module_utils import cmd_runner_fmt +from ansible_collections.community.general.plugins.module_utils.python_runner import PythonRunner +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper + + +django_std_args = dict( + # environmental options + venv=dict(type="path"), + # default options of django-admin + settings=dict(type="str", required=True), + pythonpath=dict(type="path"), + traceback=dict(type="bool"), + verbosity=dict(type="int", choices=[0, 1, 2, 3]), + skip_checks=dict(type="bool"), +) +_database_dash = dict( + database=dict(type="str", default="default"), +) +_data = dict( + excludes=dict(type="list", elements="str"), + format=dict(type="str", default="json", choices=["xml", "json", "jsonl", "yaml"]), +) +_pks = dict( + primary_keys=dict(type="list", elements="str"), +) + +_django_std_arg_fmts = dict( + all=cmd_runner_fmt.as_bool("--all"), + app=cmd_runner_fmt.as_opt_val("--app"), + apps=cmd_runner_fmt.as_list(), + apps_models=cmd_runner_fmt.as_list(), + check=cmd_runner_fmt.as_bool("--check"), + command=cmd_runner_fmt.as_list(), + database_dash=cmd_runner_fmt.as_opt_eq_val("--database"), + database_stacked_dash=cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val)("--database"), + deploy=cmd_runner_fmt.as_bool("--deploy"), + dry_run=cmd_runner_fmt.as_bool("--dry-run"), + excludes=cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val)("--exclude"), + fail_level=cmd_runner_fmt.as_opt_val("--fail-level"), + fixture=cmd_runner_fmt.as_opt_val("--output"), + fixtures=cmd_runner_fmt.as_list(), + format=cmd_runner_fmt.as_opt_val("--format"), + ignore_non_existent=cmd_runner_fmt.as_bool("--ignorenonexistent"), + indent=cmd_runner_fmt.as_opt_val("--indent"), + natural_foreign=cmd_runner_fmt.as_bool("--natural-foreign"), + natural_primary=cmd_runner_fmt.as_bool("--natural-primary"), + no_color=cmd_runner_fmt.as_fixed("--no-color"), + noinput=cmd_runner_fmt.as_fixed("--noinput"), + primary_keys=lambda v: ["--pks", ",".join(v)], + pythonpath=cmd_runner_fmt.as_opt_eq_val("--pythonpath"), + settings=cmd_runner_fmt.as_opt_eq_val("--settings"), + skip_checks=cmd_runner_fmt.as_bool("--skip-checks"), + tags=cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val)("--tag"), + traceback=cmd_runner_fmt.as_bool("--traceback"), + verbosity=cmd_runner_fmt.as_opt_val("--verbosity"), + version=cmd_runner_fmt.as_fixed("--version"), +) + +# keys can be used in _django_args +_args_menu = dict( + std=(django_std_args, _django_std_arg_fmts), + database=(_database_dash, {"database": _django_std_arg_fmts["database_dash"]}), # deprecate, remove in 13.0.0 + noinput=({}, {"noinput": cmd_runner_fmt.as_fixed("--noinput")}), # deprecate, remove in 13.0.0 + dry_run=({}, {"dry_run": cmd_runner_fmt.as_bool("--dry-run")}), # deprecate, remove in 13.0.0 + check=({}, {"check": cmd_runner_fmt.as_bool("--check")}), # deprecate, remove in 13.0.0 + database_dash=(_database_dash, {}), + data=(_data, {}), +) + + +class _DjangoRunner(PythonRunner): + def __init__(self, module, arg_formats=None, **kwargs): + arg_fmts = dict(arg_formats) if arg_formats else {} + arg_fmts.update(_django_std_arg_fmts) + + super(_DjangoRunner, self).__init__(module, ["-m", "django"], arg_formats=arg_fmts, **kwargs) + + def __call__(self, output_process=None, check_mode_skip=False, check_mode_return=None, **kwargs): + args_order = ( + ("command", "no_color", "settings", "pythonpath", "traceback", "verbosity", "skip_checks") + self._prepare_args_order(self.default_args_order) + ) + return super(_DjangoRunner, self).__call__(args_order, output_process, check_mode_skip=check_mode_skip, check_mode_return=check_mode_return, **kwargs) + + def bare_context(self, *args, **kwargs): + return super(_DjangoRunner, self).__call__(*args, **kwargs) + + +class DjangoModuleHelper(ModuleHelper): + module = {} + django_admin_cmd = None + arg_formats = {} + django_admin_arg_order = () + _django_args = [] + _check_mode_arg = "" + + def __init__(self): + self.module["argument_spec"], self.arg_formats = self._build_args(self.module.get("argument_spec", {}), + self.arg_formats, + *(["std"] + self._django_args)) + super(DjangoModuleHelper, self).__init__(self.module) + if self.django_admin_cmd is not None: + self.vars.command = self.django_admin_cmd + + @staticmethod + def _build_args(arg_spec, arg_format, *names): + res_arg_spec = {} + res_arg_fmts = {} + for name in names: + args, fmts = _args_menu[name] + res_arg_spec = dict_merge(res_arg_spec, args) + res_arg_fmts = dict_merge(res_arg_fmts, fmts) + res_arg_spec = dict_merge(res_arg_spec, arg_spec) + res_arg_fmts = dict_merge(res_arg_fmts, arg_format) + + return res_arg_spec, res_arg_fmts + + def __run__(self): + runner = _DjangoRunner(self.module, + default_args_order=self.django_admin_arg_order, + arg_formats=self.arg_formats, + venv=self.vars.venv, + check_rc=True) + + run_params = self.vars.as_dict() + if self._check_mode_arg: + run_params.update({self._check_mode_arg: self.check_mode}) + + rc, out, err = runner.bare_context("version").run() + self.vars.version = out.strip() + + with runner() as ctx: + results = ctx.run(**run_params) + self.vars.stdout = ctx.results_out + self.vars.stderr = ctx.results_err + self.vars.cmd = ctx.cmd + self.vars.set("run_info", ctx.run_info, verbosity=3) + + return results + + @classmethod + def execute(cls): + cls().run() diff --git a/plugins/module_utils/gandi_livedns_api.py b/plugins/module_utils/gandi_livedns_api.py index 53245d44d0..135fc6188c 100644 --- a/plugins/module_utils/gandi_livedns_api.py +++ b/plugins/module_utils/gandi_livedns_api.py @@ -1,14 +1,12 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2019 Gregory Thiemonge # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import json -from ansible.module_utils.common.text.converters import to_native, to_text +from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.urls import fetch_url @@ -33,6 +31,7 @@ class GandiLiveDNSAPI(object): def __init__(self, module): self.module = module self.api_key = module.params['api_key'] + self.personal_access_token = module.params['personal_access_token'] def _build_error_message(self, module, info): s = '' @@ -43,21 +42,26 @@ class GandiLiveDNSAPI(object): error = errors[0] name = error.get('name') if name: - s += '{0} :'.format(name) + s += f'{name} :' description = error.get('description') if description: s += description return s def _gandi_api_call(self, api_call, method='GET', payload=None, error_on_404=True): - headers = {'Authorization': 'Apikey {0}'.format(self.api_key), + authorization_header = ( + f'Bearer {self.personal_access_token}' + if self.personal_access_token + else f'Apikey {self.api_key}' + ) + headers = {'Authorization': authorization_header, 'Content-Type': 'application/json'} data = None if payload: try: data = json.dumps(payload) except Exception as e: - self.module.fail_json(msg="Failed to encode payload as JSON: %s " % to_native(e)) + self.module.fail_json(msg=f"Failed to encode payload as JSON: {e} ") resp, info = fetch_url(self.module, self.api_endpoint + api_call, @@ -69,7 +73,7 @@ class GandiLiveDNSAPI(object): if info['status'] >= 400 and (info['status'] != 404 or error_on_404): err_s = self.error_strings.get(info['status'], '') - error_msg = "API Error {0}: {1}".format(err_s, self._build_error_message(self.module, info)) + error_msg = f"API Error {err_s}: {self._build_error_message(self.module, info)}" result = None try: @@ -81,7 +85,7 @@ class GandiLiveDNSAPI(object): try: result = json.loads(to_text(content, errors='surrogate_or_strict')) except (getattr(json, 'JSONDecodeError', ValueError)) as e: - error_msg += "; Failed to parse API response with error {0}: {1}".format(to_native(e), content) + error_msg += f"; Failed to parse API response with error {e}: {content}" if error_msg: self.module.fail_json(msg=error_msg) @@ -110,11 +114,11 @@ class GandiLiveDNSAPI(object): return [self.build_result(r, domain) for r in results] def get_records(self, record, type, domain): - url = '/domains/%s/records' % (domain) + url = f'/domains/{domain}/records' if record: - url += '/%s' % (record) + url += f'/{record}' if type: - url += '/%s' % (type) + url += f'/{type}' records, status = self._gandi_api_call(url, error_on_404=False) @@ -133,7 +137,7 @@ class GandiLiveDNSAPI(object): return records def create_record(self, record, type, values, ttl, domain): - url = '/domains/%s/records' % (domain) + url = f'/domains/{domain}/records' new_record = { 'rrset_name': record, 'rrset_type': type, @@ -148,7 +152,7 @@ class GandiLiveDNSAPI(object): return None def update_record(self, record, type, values, ttl, domain): - url = '/domains/%s/records/%s/%s' % (domain, record, type) + url = f'/domains/{domain}/records/{record}/{type}' new_record = { 'rrset_values': values, 'rrset_ttl': ttl, @@ -157,7 +161,7 @@ class GandiLiveDNSAPI(object): return record def delete_record(self, record, type, domain): - url = '/domains/%s/records/%s/%s' % (domain, record, type) + url = f'/domains/{domain}/records/{record}/{type}' self._gandi_api_call(url, method='DELETE') diff --git a/plugins/module_utils/gconftool2.py b/plugins/module_utils/gconftool2.py index e90c3fb2cb..7d11078edf 100644 --- a/plugins/module_utils/gconftool2.py +++ b/plugins/module_utils/gconftool2.py @@ -1,10 +1,8 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2022, Alexei Znamensky # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt @@ -27,6 +25,7 @@ def gconftool2_runner(module, **kwargs): value=cmd_runner_fmt.as_list(), direct=cmd_runner_fmt.as_bool("--direct"), config_source=cmd_runner_fmt.as_opt_val("--config-source"), + version=cmd_runner_fmt.as_fixed("--version"), ), **kwargs ) diff --git a/plugins/module_utils/gio_mime.py b/plugins/module_utils/gio_mime.py new file mode 100644 index 0000000000..15122b1ef1 --- /dev/null +++ b/plugins/module_utils/gio_mime.py @@ -0,0 +1,32 @@ +# Copyright (c) 2022, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +def gio_mime_runner(module, **kwargs): + return CmdRunner( + module, + command=['gio'], + arg_formats=dict( + mime=cmd_runner_fmt.as_fixed('mime'), + mime_type=cmd_runner_fmt.as_list(), + handler=cmd_runner_fmt.as_list(), + version=cmd_runner_fmt.as_fixed('--version'), + ), + **kwargs + ) + + +def gio_mime_get(runner, mime_type): + def process(rc, out, err): + if err.startswith("No default applications for"): + return None + out = out.splitlines()[0] + return out.split()[-1] + + with runner("mime mime_type", output_process=process) as ctx: + return ctx.run(mime_type=mime_type) diff --git a/plugins/module_utils/gitlab.py b/plugins/module_utils/gitlab.py index 7cb59e4c2c..7ad11ab5a2 100644 --- a/plugins/module_utils/gitlab.py +++ b/plugins/module_utils/gitlab.py @@ -1,38 +1,48 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) # Copyright (c) 2018, Marcus Watkins # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from ansible.module_utils.basic import missing_required_lib -from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.version import LooseVersion -try: - from urlparse import urljoin -except ImportError: - from urllib.parse import urljoin # Python 3+ +from urllib.parse import urljoin import traceback + +def _determine_list_all_kwargs(version): + gitlab_version = LooseVersion(version) + if gitlab_version >= LooseVersion('4.0.0'): + # 4.0.0 removed 'as_list' + return {'iterator': True, 'per_page': 100} + elif gitlab_version >= LooseVersion('3.7.0'): + # 3.7.0 added 'get_all' + return {'as_list': False, 'get_all': True, 'per_page': 100} + else: + return {'as_list': False, 'all': True, 'per_page': 100} + + GITLAB_IMP_ERR = None try: import gitlab import requests HAS_GITLAB_PACKAGE = True + list_all_kwargs = _determine_list_all_kwargs(gitlab.__version__) except Exception: gitlab = None GITLAB_IMP_ERR = traceback.format_exc() HAS_GITLAB_PACKAGE = False + list_all_kwargs = {} def auth_argument_spec(spec=None): arg_spec = (dict( + ca_path=dict(type='str'), api_token=dict(type='str', no_log=True), api_oauth_token=dict(type='str', no_log=True), api_job_token=dict(type='str', no_log=True), @@ -48,7 +58,7 @@ def find_project(gitlab_instance, identifier): except Exception as e: current_user = gitlab_instance.user try: - project = gitlab_instance.projects.get(current_user.username + '/' + identifier) + project = gitlab_instance.projects.get(f"{current_user.username}/{identifier}") except Exception as e: return None @@ -57,57 +67,60 @@ def find_project(gitlab_instance, identifier): def find_group(gitlab_instance, identifier): try: - project = gitlab_instance.groups.get(identifier) + group = gitlab_instance.groups.get(identifier) except Exception as e: return None - return project + return group -def ensure_gitlab_package(module): +def ensure_gitlab_package(module, min_version=None): if not HAS_GITLAB_PACKAGE: module.fail_json( msg=missing_required_lib("python-gitlab", url='https://python-gitlab.readthedocs.io/en/stable/'), exception=GITLAB_IMP_ERR ) + gitlab_version = gitlab.__version__ + if min_version is not None and LooseVersion(gitlab_version) < LooseVersion(min_version): + module.fail_json(msg=( + f"This module requires python-gitlab Python module >= {min_version} (installed version: " + f"{gitlab_version}). Please upgrade python-gitlab to version {min_version} or above." + )) -def gitlab_authentication(module): +def gitlab_authentication(module, min_version=None): + ensure_gitlab_package(module, min_version=min_version) + gitlab_url = module.params['api_url'] validate_certs = module.params['validate_certs'] + ca_path = module.params['ca_path'] gitlab_user = module.params['api_username'] gitlab_password = module.params['api_password'] gitlab_token = module.params['api_token'] gitlab_oauth_token = module.params['api_oauth_token'] gitlab_job_token = module.params['api_job_token'] - ensure_gitlab_package(module) + verify = ca_path if validate_certs and ca_path else validate_certs try: - # python-gitlab library remove support for username/password authentication since 1.13.0 - # Changelog : https://github.com/python-gitlab/python-gitlab/releases/tag/v1.13.0 - # This condition allow to still support older version of the python-gitlab library - if LooseVersion(gitlab.__version__) < LooseVersion("1.13.0"): - gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, email=gitlab_user, password=gitlab_password, - private_token=gitlab_token, api_version=4) - else: - # We can create an oauth_token using a username and password - # https://docs.gitlab.com/ee/api/oauth2.html#authorization-code-flow - if gitlab_user: - data = {'grant_type': 'password', 'username': gitlab_user, 'password': gitlab_password} - resp = requests.post(urljoin(gitlab_url, "oauth/token"), data=data, verify=validate_certs) - resp_data = resp.json() - gitlab_oauth_token = resp_data["access_token"] - - gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, private_token=gitlab_token, - oauth_token=gitlab_oauth_token, job_token=gitlab_job_token, api_version=4) + # We can create an oauth_token using a username and password + # https://docs.gitlab.com/ee/api/oauth2.html#authorization-code-flow + if gitlab_user: + data = {'grant_type': 'password', 'username': gitlab_user, 'password': gitlab_password} + resp = requests.post(urljoin(gitlab_url, "oauth/token"), data=data, verify=verify) + resp_data = resp.json() + gitlab_oauth_token = resp_data["access_token"] + gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=verify, private_token=gitlab_token, + oauth_token=gitlab_oauth_token, job_token=gitlab_job_token, api_version=4) gitlab_instance.auth() except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e: - module.fail_json(msg="Failed to connect to GitLab server: %s" % to_native(e)) + module.fail_json(msg=f"Failed to connect to GitLab server: {e}") except (gitlab.exceptions.GitlabHttpError) as e: - module.fail_json(msg="Failed to connect to GitLab server: %s. \ - GitLab remove Session API now that private tokens are removed from user API endpoints since version 10.2." % to_native(e)) + module.fail_json(msg=( + f"Failed to connect to GitLab server: {e}. GitLab remove Session API now " + "that private tokens are removed from user API endpoints since version 10.2." + )) return gitlab_instance @@ -115,9 +128,50 @@ def gitlab_authentication(module): def filter_returned_variables(gitlab_variables): # pop properties we don't know existing_variables = [dict(x.attributes) for x in gitlab_variables] - KNOWN = ['key', 'value', 'masked', 'protected', 'variable_type', 'environment_scope'] + KNOWN = ['key', 'value', 'description', 'masked', 'hidden', 'protected', 'variable_type', 'environment_scope', 'raw'] for item in existing_variables: for key in list(item.keys()): if key not in KNOWN: item.pop(key) return existing_variables + + +def vars_to_variables(vars, module): + # transform old vars to new variables structure + variables = list() + for item, value in vars.items(): + if isinstance(value, (str, int, float)): + variables.append( + { + "name": item, + "value": str(value), + "description": None, + "masked": False, + "protected": False, + "hidden": False, + "raw": False, + "variable_type": "env_var", + } + ) + + elif isinstance(value, dict): + new_item = { + "name": item, + "value": value.get('value'), + "description": value.get('description'), + "masked": value.get('masked'), + "hidden": value.get('hidden'), + "protected": value.get('protected'), + "raw": value.get('raw'), + "variable_type": value.get('variable_type'), + } + + if value.get('environment_scope'): + new_item['environment_scope'] = value.get('environment_scope') + + variables.append(new_item) + + else: + module.fail_json(msg="value must be of type string, integer, float or dict") + + return variables diff --git a/plugins/module_utils/heroku.py b/plugins/module_utils/heroku.py index f5ed3e2b89..149e11162e 100644 --- a/plugins/module_utils/heroku.py +++ b/plugins/module_utils/heroku.py @@ -1,10 +1,8 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Ansible Project # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import traceback diff --git a/plugins/module_utils/homebrew.py b/plugins/module_utils/homebrew.py new file mode 100644 index 0000000000..88e92461c3 --- /dev/null +++ b/plugins/module_utils/homebrew.py @@ -0,0 +1,135 @@ +# Copyright (c) Ansible project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause + +from __future__ import annotations + + +import os +import re + + +def _create_regex_group_complement(s): + lines = (line.strip() for line in s.split("\n") if line.strip()) + chars = [_f for _f in (line.split("#")[0].strip() for line in lines) if _f] + group = rf"[^{''.join(chars)}]" + return re.compile(group) + + +class HomebrewValidate(object): + # class regexes ------------------------------------------------ {{{ + VALID_PATH_CHARS = rf""" + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + \s # spaces + : # colons + {os.path.sep} # the OS-specific path separator + . # dots + \- # dashes + """ + + VALID_BREW_PATH_CHARS = rf""" + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + \s # spaces + {os.path.sep} # the OS-specific path separator + . # dots + \- # dashes + """ + + VALID_PACKAGE_CHARS = r""" + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + . # dots + / # slash (for taps) + \+ # plusses + \- # dashes + : # colons (for URLs) + @ # at-sign + """ + + INVALID_PATH_REGEX = _create_regex_group_complement(VALID_PATH_CHARS) + INVALID_BREW_PATH_REGEX = _create_regex_group_complement(VALID_BREW_PATH_CHARS) + INVALID_PACKAGE_REGEX = _create_regex_group_complement(VALID_PACKAGE_CHARS) + # /class regexes ----------------------------------------------- }}} + + # class validations -------------------------------------------- {{{ + @classmethod + def valid_path(cls, path): + """ + `path` must be one of: + - list of paths + - a string containing only: + - alphanumeric characters + - dashes + - dots + - spaces + - colons + - os.path.sep + """ + + if isinstance(path, str): + return not cls.INVALID_PATH_REGEX.search(path) + + try: + iter(path) + except TypeError: + return False + else: + paths = path + return all(cls.valid_brew_path(path_) for path_ in paths) + + @classmethod + def valid_brew_path(cls, brew_path): + """ + `brew_path` must be one of: + - None + - a string containing only: + - alphanumeric characters + - dashes + - dots + - spaces + - os.path.sep + """ + + if brew_path is None: + return True + + return isinstance( + brew_path, str + ) and not cls.INVALID_BREW_PATH_REGEX.search(brew_path) + + @classmethod + def valid_package(cls, package): + """A valid package is either None or alphanumeric.""" + + if package is None: + return True + + return isinstance( + package, str + ) and not cls.INVALID_PACKAGE_REGEX.search(package) + + +def parse_brew_path(module): + # type: (...) -> str + """Attempt to find the Homebrew executable path. + + Requires: + - module has a `path` parameter + - path is a valid path string for the target OS. Otherwise, module.fail_json() + is called with msg="Invalid_path: ". + """ + path = module.params["path"] + if not HomebrewValidate.valid_path(path): + module.fail_json(msg=f"Invalid path: {path}") + + if isinstance(path, str): + paths = path.split(":") + elif isinstance(path, list): + paths = path + else: + module.fail_json(msg=f"Invalid path: {path}") + + brew_path = module.get_bin_path("brew", required=True, opt_dirs=paths) + if not HomebrewValidate.valid_brew_path(brew_path): + module.fail_json(msg=f"Invalid brew path: {brew_path}") + + return brew_path diff --git a/plugins/module_utils/hwc_utils.py b/plugins/module_utils/hwc_utils.py index a21cc8e48f..dee53cd787 100644 --- a/plugins/module_utils/hwc_utils.py +++ b/plugins/module_utils/hwc_utils.py @@ -1,10 +1,8 @@ -# -*- coding: utf-8 -*- # Copyright (c), Google Inc, 2017 # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import re import time @@ -32,7 +30,7 @@ class HwcModuleException(Exception): self._message = message def __str__(self): - return "[HwcClientException] message=%s" % self._message + return f"[HwcClientException] message={self._message}" class HwcClientException(Exception): @@ -43,9 +41,8 @@ class HwcClientException(Exception): self._message = message def __str__(self): - msg = " code=%s," % str(self._code) if self._code != 0 else "" - return "[HwcClientException]%s message=%s" % ( - msg, self._message) + msg = f" code={self._code!s}," if self._code != 0 else "" + return f"[HwcClientException]{msg} message={self._message}" class HwcClientException404(HwcClientException): @@ -53,7 +50,7 @@ class HwcClientException404(HwcClientException): super(HwcClientException404, self).__init__(404, message) def __str__(self): - return "[HwcClientException404] message=%s" % self._message + return f"[HwcClientException404] message={self._message}" def session_method_wrapper(f): @@ -63,7 +60,7 @@ def session_method_wrapper(f): r = f(self, url, *args, **kwargs) except Exception as ex: raise HwcClientException( - 0, "Sending request failed, error=%s" % ex) + 0, f"Sending request failed, error={ex}") result = None if r.content: @@ -71,7 +68,7 @@ def session_method_wrapper(f): result = r.json() except Exception as ex: raise HwcClientException( - 0, "Parsing response to json failed, error: %s" % ex) + 0, f"Parsing response to json failed, error: {ex}") code = r.status_code if code not in [200, 201, 202, 203, 204, 205, 206, 207, 208, 226]: @@ -100,7 +97,7 @@ class _ServiceClient(object): self._client = client self._endpoint = endpoint self._default_header = { - 'User-Agent': "Huawei-Ansible-MM-%s" % product, + 'User-Agent': f"Huawei-Ansible-MM-{product}", 'Accept': 'application/json', } @@ -188,7 +185,7 @@ class Config(object): raise_exc=False) def _get_service_endpoint(self, client, service_type, region): - k = "%s.%s" % (service_type, region if region else "") + k = f"{service_type}.{region if region else ''}" if k in self._endpoints: return self._endpoints.get(k) @@ -199,11 +196,11 @@ class Config(object): region_name=region, interface="public") except Exception as ex: raise HwcClientException( - 0, "Getting endpoint failed, error=%s" % ex) + 0, f"Getting endpoint failed, error={ex}") if url == "": raise HwcClientException( - 0, "Can not find the enpoint for %s" % service_type) + 0, f"Cannot find the endpoint for {service_type}") if url[-1] != "/": url += "/" @@ -342,7 +339,7 @@ def wait_to_finish(target, pending, refresh, timeout, min_interval=1, delay=3): if not_found_times > 10: raise HwcModuleException( - "not found the object for %d times" % not_found_times) + f"not found the object for {not_found_times} times") else: not_found_times = 0 @@ -351,7 +348,7 @@ def wait_to_finish(target, pending, refresh, timeout, min_interval=1, delay=3): if pending and status not in pending: raise HwcModuleException( - "unexpect status(%s) occurred" % status) + f"unexpected status({status}) occurred") if not is_last_time: wait *= 2 @@ -362,7 +359,7 @@ def wait_to_finish(target, pending, refresh, timeout, min_interval=1, delay=3): time.sleep(wait) - raise HwcModuleException("asycn wait timeout after %d seconds" % timeout) + raise HwcModuleException(f"async wait timeout after {timeout} seconds") def navigate_value(data, index, array_index=None): @@ -381,7 +378,7 @@ def navigate_value(data, index, array_index=None): i = index[n] if i not in d: raise HwcModuleException( - "navigate value failed: key(%s) is not exist in dict" % i) + f"navigate value failed: key({i}) is not exist in dict") d = d[i] if not array_index: diff --git a/plugins/module_utils/ibm_sa_utils.py b/plugins/module_utils/ibm_sa_utils.py index abbb57f520..0c8f3d274d 100644 --- a/plugins/module_utils/ibm_sa_utils.py +++ b/plugins/module_utils/ibm_sa_utils.py @@ -1,12 +1,10 @@ -# -*- coding: utf-8 -*- # Copyright (C) 2018 IBM CORPORATION # Author(s): Tzur Eliyahu # # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import traceback diff --git a/plugins/module_utils/identity/keycloak/keycloak.py b/plugins/module_utils/identity/keycloak/keycloak.py index 09b22b7561..ad07f27b1b 100644 --- a/plugins/module_utils/identity/keycloak/keycloak.py +++ b/plugins/module_utils/identity/keycloak/keycloak.py @@ -1,23 +1,23 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017, Eike Frost # BSD 2-Clause license (see LICENSES/BSD-2-Clause.txt) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type import json import traceback +import copy +from urllib.parse import urlencode, quote +from urllib.error import HTTPError from ansible.module_utils.urls import open_url -from ansible.module_utils.six.moves.urllib.parse import urlencode, quote -from ansible.module_utils.six.moves.urllib.error import HTTPError from ansible.module_utils.common.text.converters import to_native, to_text URL_REALM_INFO = "{url}/realms/{realm}" URL_REALMS = "{url}/admin/realms" URL_REALM = "{url}/admin/realms/{realm}" +URL_REALM_KEYS_METADATA = "{url}/admin/realms/{realm}/keys" URL_TOKEN = "{url}/realms/{realm}/protocol/openid-connect/token" URL_CLIENT = "{url}/admin/realms/{realm}/clients/{id}" @@ -27,6 +27,9 @@ URL_CLIENT_ROLES = "{url}/admin/realms/{realm}/clients/{id}/roles" URL_CLIENT_ROLE = "{url}/admin/realms/{realm}/clients/{id}/roles/{name}" URL_CLIENT_ROLE_COMPOSITES = "{url}/admin/realms/{realm}/clients/{id}/roles/{name}/composites" +URL_CLIENT_ROLE_SCOPE_CLIENTS = "{url}/admin/realms/{realm}/clients/{id}/scope-mappings/clients/{scopeid}" +URL_CLIENT_ROLE_SCOPE_REALM = "{url}/admin/realms/{realm}/clients/{id}/scope-mappings/realm" + URL_REALM_ROLES = "{url}/admin/realms/{realm}/roles" URL_REALM_ROLE = "{url}/admin/realms/{realm}/roles/{name}" URL_REALM_ROLEMAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings/realm" @@ -42,22 +45,43 @@ URL_CLIENTTEMPLATE = "{url}/admin/realms/{realm}/client-templates/{id}" URL_CLIENTTEMPLATES = "{url}/admin/realms/{realm}/client-templates" URL_GROUPS = "{url}/admin/realms/{realm}/groups" URL_GROUP = "{url}/admin/realms/{realm}/groups/{groupid}" +URL_GROUP_CHILDREN = "{url}/admin/realms/{realm}/groups/{groupid}/children" URL_CLIENTSCOPES = "{url}/admin/realms/{realm}/client-scopes" URL_CLIENTSCOPE = "{url}/admin/realms/{realm}/client-scopes/{id}" URL_CLIENTSCOPE_PROTOCOLMAPPERS = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models" URL_CLIENTSCOPE_PROTOCOLMAPPER = "{url}/admin/realms/{realm}/client-scopes/{id}/protocol-mappers/models/{mapper_id}" +URL_DEFAULT_CLIENTSCOPES = "{url}/admin/realms/{realm}/default-default-client-scopes" +URL_DEFAULT_CLIENTSCOPE = "{url}/admin/realms/{realm}/default-default-client-scopes/{id}" +URL_OPTIONAL_CLIENTSCOPES = "{url}/admin/realms/{realm}/default-optional-client-scopes" +URL_OPTIONAL_CLIENTSCOPE = "{url}/admin/realms/{realm}/default-optional-client-scopes/{id}" + +URL_CLIENT_DEFAULT_CLIENTSCOPES = "{url}/admin/realms/{realm}/clients/{cid}/default-client-scopes" +URL_CLIENT_DEFAULT_CLIENTSCOPE = "{url}/admin/realms/{realm}/clients/{cid}/default-client-scopes/{id}" +URL_CLIENT_OPTIONAL_CLIENTSCOPES = "{url}/admin/realms/{realm}/clients/{cid}/optional-client-scopes" +URL_CLIENT_OPTIONAL_CLIENTSCOPE = "{url}/admin/realms/{realm}/clients/{cid}/optional-client-scopes/{id}" + URL_CLIENT_GROUP_ROLEMAPPINGS = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}" URL_CLIENT_GROUP_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/available" URL_CLIENT_GROUP_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/groups/{id}/role-mappings/clients/{client}/composite" URL_USERS = "{url}/admin/realms/{realm}/users" +URL_USER = "{url}/admin/realms/{realm}/users/{id}" +URL_USER_ROLE_MAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings" +URL_USER_REALM_ROLE_MAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings/realm" +URL_USER_CLIENTS_ROLE_MAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients" +URL_USER_CLIENT_ROLE_MAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client_id}" +URL_USER_GROUPS = "{url}/admin/realms/{realm}/users/{id}/groups" +URL_USER_GROUP = "{url}/admin/realms/{realm}/users/{id}/groups/{group_id}" + URL_CLIENT_SERVICE_ACCOUNT_USER = "{url}/admin/realms/{realm}/clients/{id}/service-account-user" URL_CLIENT_USER_ROLEMAPPINGS = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client}" URL_CLIENT_USER_ROLEMAPPINGS_AVAILABLE = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client}/available" URL_CLIENT_USER_ROLEMAPPINGS_COMPOSITE = "{url}/admin/realms/{realm}/users/{id}/role-mappings/clients/{client}/composite" +URL_REALM_GROUP_ROLEMAPPINGS = "{url}/admin/realms/{realm}/groups/{group}/role-mappings/realm" + URL_CLIENTSECRET = "{url}/admin/realms/{realm}/clients/{id}/client-secret" URL_AUTHENTICATION_FLOWS = "{url}/admin/realms/{realm}/authentication/flows" @@ -70,15 +94,36 @@ URL_AUTHENTICATION_EXECUTION_CONFIG = "{url}/admin/realms/{realm}/authentication URL_AUTHENTICATION_EXECUTION_RAISE_PRIORITY = "{url}/admin/realms/{realm}/authentication/executions/{id}/raise-priority" URL_AUTHENTICATION_EXECUTION_LOWER_PRIORITY = "{url}/admin/realms/{realm}/authentication/executions/{id}/lower-priority" URL_AUTHENTICATION_CONFIG = "{url}/admin/realms/{realm}/authentication/config/{id}" +URL_AUTHENTICATION_REGISTER_REQUIRED_ACTION = "{url}/admin/realms/{realm}/authentication/register-required-action" +URL_AUTHENTICATION_REQUIRED_ACTIONS = "{url}/admin/realms/{realm}/authentication/required-actions" +URL_AUTHENTICATION_REQUIRED_ACTIONS_ALIAS = "{url}/admin/realms/{realm}/authentication/required-actions/{alias}" URL_IDENTITY_PROVIDERS = "{url}/admin/realms/{realm}/identity-provider/instances" URL_IDENTITY_PROVIDER = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}" URL_IDENTITY_PROVIDER_MAPPERS = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}/mappers" URL_IDENTITY_PROVIDER_MAPPER = "{url}/admin/realms/{realm}/identity-provider/instances/{alias}/mappers/{id}" +URL_IDENTITY_PROVIDER_IMPORT = "{url}/admin/realms/{realm}/identity-provider/import-config" URL_COMPONENTS = "{url}/admin/realms/{realm}/components" URL_COMPONENT = "{url}/admin/realms/{realm}/components/{id}" +URL_AUTHZ_AUTHORIZATION_SCOPE = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/scope/{id}" +URL_AUTHZ_AUTHORIZATION_SCOPES = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/scope" + +# This URL is used for: +# - Querying client authorization permissions +# - Removing client authorization permissions +URL_AUTHZ_POLICIES = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/policy" +URL_AUTHZ_POLICY = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/policy/{id}" + +URL_AUTHZ_PERMISSION = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/permission/{permission_type}/{id}" +URL_AUTHZ_PERMISSIONS = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/permission/{permission_type}" + +URL_AUTHZ_RESOURCES = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/resource" + +URL_AUTHZ_CUSTOM_POLICY = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/policy/{policy_type}" +URL_AUTHZ_CUSTOM_POLICIES = "{url}/admin/realms/{realm}/clients/{client_id}/authz/resource-server/policy" + def keycloak_argument_spec(): """ @@ -96,6 +141,7 @@ def keycloak_argument_spec(): validate_certs=dict(type='bool', default=True), connection_timeout=dict(type='int', default=10), token=dict(type='str', no_log=True), + refresh_token=dict(type='str', no_log=True), http_agent=dict(type='str', default='Ansible'), ) @@ -105,59 +151,142 @@ def camel(words): class KeycloakError(Exception): - pass + def __init__(self, msg, authError=None): + self.msg = msg + self.authError = authError + + def __str__(self): + return str(self.msg) + + +def _token_request(module_params, payload): + """ Obtains connection header with token for the authentication, + using the provided auth_username/auth_password + :param module_params: parameters of the module + :param payload: + type: + dict + description: + Authentication request payload. Must contain at least + 'grant_type' and 'client_id', optionally 'client_secret', + along with parameters based on 'grant_type'; e.g., + 'username'/'password' for type 'password', + 'refresh_token' for type 'refresh_token'. + :return: access token + """ + base_url = module_params.get('auth_keycloak_url') + if not base_url.lower().startswith(('http', 'https')): + raise KeycloakError("auth_url '%s' should either start with 'http' or 'https'." % base_url) + auth_realm = module_params.get('auth_realm') + auth_url = URL_TOKEN.format(url=base_url, realm=auth_realm) + http_agent = module_params.get('http_agent') + validate_certs = module_params.get('validate_certs') + connection_timeout = module_params.get('connection_timeout') + + try: + r = json.loads(to_native(open_url(auth_url, method='POST', + validate_certs=validate_certs, http_agent=http_agent, timeout=connection_timeout, + data=urlencode(payload)).read())) + + return r['access_token'] + except ValueError as e: + raise KeycloakError( + 'API returned invalid JSON when trying to obtain access token from %s: %s' + % (auth_url, str(e))) + except KeyError: + raise KeycloakError( + 'API did not include access_token field in response from %s' % auth_url) + except Exception as e: + raise KeycloakError('Could not obtain access token from %s: %s' + % (auth_url, str(e)), authError=e) + + +def _request_token_using_credentials(module_params): + """ Obtains connection header with token for the authentication, + using the provided auth_username/auth_password + :param module_params: parameters of the module. Must include 'auth_username' and 'auth_password'. + :return: connection header + """ + client_id = module_params.get('auth_client_id') + auth_username = module_params.get('auth_username') + auth_password = module_params.get('auth_password') + client_secret = module_params.get('auth_client_secret') + + temp_payload = { + 'grant_type': 'password', + 'client_id': client_id, + 'client_secret': client_secret, + 'username': auth_username, + 'password': auth_password, + } + # Remove empty items, for instance missing client_secret + payload = {k: v for k, v in temp_payload.items() if v is not None} + + return _token_request(module_params, payload) + + +def _request_token_using_refresh_token(module_params): + """ Obtains connection header with token for the authentication, + using the provided refresh_token + :param module_params: parameters of the module. Must include 'refresh_token'. + :return: connection header + """ + client_id = module_params.get('auth_client_id') + refresh_token = module_params.get('refresh_token') + client_secret = module_params.get('auth_client_secret') + + temp_payload = { + 'grant_type': 'refresh_token', + 'client_id': client_id, + 'client_secret': client_secret, + 'refresh_token': refresh_token, + } + # Remove empty items, for instance missing client_secret + payload = {k: v for k, v in temp_payload.items() if v is not None} + + return _token_request(module_params, payload) + + +def _request_token_using_client_credentials(module_params): + """ Obtains connection header with token for the authentication, + using the provided auth_client_id and auth_client_secret by grant_type + client_credentials. Ensure that the used client uses client authorization + with service account roles enabled and required service roles assigned. + :param module_params: parameters of the module. Must include 'auth_client_id' + and 'auth_client_secret'.. + :return: connection header + """ + client_id = module_params.get('auth_client_id') + client_secret = module_params.get('auth_client_secret') + + temp_payload = { + 'grant_type': 'client_credentials', + 'client_id': client_id, + 'client_secret': client_secret, + } + # Remove empty items, for instance missing client_secret + payload = {k: v for k, v in temp_payload.items() if v is not None} + + return _token_request(module_params, payload) def get_token(module_params): """ Obtains connection header with token for the authentication, - token already given or obtained from credentials - :param module_params: parameters of the module - :return: connection header + token already given or obtained from credentials + :param module_params: parameters of the module + :return: connection header """ token = module_params.get('token') - base_url = module_params.get('auth_keycloak_url') - http_agent = module_params.get('http_agent') - - if not base_url.lower().startswith(('http', 'https')): - raise KeycloakError("auth_url '%s' should either start with 'http' or 'https'." % base_url) if token is None: - base_url = module_params.get('auth_keycloak_url') - validate_certs = module_params.get('validate_certs') - auth_realm = module_params.get('auth_realm') - client_id = module_params.get('auth_client_id') + auth_client_id = module_params.get('auth_client_id') + auth_client_secret = module_params.get('auth_client_secret') auth_username = module_params.get('auth_username') - auth_password = module_params.get('auth_password') - client_secret = module_params.get('auth_client_secret') - connection_timeout = module_params.get('connection_timeout') - auth_url = URL_TOKEN.format(url=base_url, realm=auth_realm) - temp_payload = { - 'grant_type': 'password', - 'client_id': client_id, - 'client_secret': client_secret, - 'username': auth_username, - 'password': auth_password, - } - # Remove empty items, for instance missing client_secret - payload = dict( - (k, v) for k, v in temp_payload.items() if v is not None) - try: - r = json.loads(to_native(open_url(auth_url, method='POST', - validate_certs=validate_certs, http_agent=http_agent, timeout=connection_timeout, - data=urlencode(payload)).read())) - except ValueError as e: - raise KeycloakError( - 'API returned invalid JSON when trying to obtain access token from %s: %s' - % (auth_url, str(e))) - except Exception as e: - raise KeycloakError('Could not obtain access token from %s: %s' - % (auth_url, str(e))) + if auth_client_id is not None and auth_client_secret is not None and auth_username is None: + token = _request_token_using_client_credentials(module_params) + else: + token = _request_token_using_credentials(module_params) - try: - token = r['access_token'] - except KeyError: - raise KeycloakError( - 'Could not obtain access token from %s' % auth_url) return { 'Authorization': 'Bearer ' + token, 'Content-Type': 'application/json' @@ -193,24 +322,30 @@ def is_struct_included(struct1, struct2, exclude=None): Return True if all element of dict 1 are present in dict 2, return false otherwise. """ if isinstance(struct1, list) and isinstance(struct2, list): + if not struct1 and not struct2: + return True for item1 in struct1: if isinstance(item1, (list, dict)): for item2 in struct2: - if not is_struct_included(item1, item2, exclude): - return False + if is_struct_included(item1, item2, exclude): + break + else: + return False else: if item1 not in struct2: return False return True elif isinstance(struct1, dict) and isinstance(struct2, dict): + if not struct1 and not struct2: + return True try: for key in struct1: if not (exclude and key in exclude): if not is_struct_included(struct1[key], struct2[key], exclude): return False - return True except KeyError: return False + return True elif isinstance(struct1, bool) and isinstance(struct2, bool): return struct1 == struct2 else: @@ -221,6 +356,7 @@ class KeycloakAPI(object): """ Keycloak API access; Keycloak uses OAuth 2.0 to protect its API, an access token for which is obtained through OpenID connect """ + def __init__(self, module, connection_header): self.module = module self.baseurl = self.module.params.get('auth_keycloak_url') @@ -229,6 +365,87 @@ class KeycloakAPI(object): self.restheaders = connection_header self.http_agent = self.module.params.get('http_agent') + def _request(self, url, method, data=None): + """ Makes a request to Keycloak and returns the raw response. + If a 401 is returned, attempts to re-authenticate + using first the module's refresh_token (if provided) + and then the module's username/password (if provided). + On successful re-authentication, the new token is stored + in the restheaders for future requests. + + :param url: request path + :param method: request method (e.g., 'GET', 'POST', etc.) + :param data: (optional) data for request + :return: raw API response + """ + def make_request_catching_401(): + try: + return open_url(url, method=method, data=data, + http_agent=self.http_agent, headers=self.restheaders, + timeout=self.connection_timeout, + validate_certs=self.validate_certs) + except HTTPError as e: + if e.code != 401: + raise e + return e + + r = make_request_catching_401() + + if isinstance(r, Exception): + # Try to refresh token and retry, if available + refresh_token = self.module.params.get('refresh_token') + if refresh_token is not None: + try: + token = _request_token_using_refresh_token(self.module.params) + self.restheaders['Authorization'] = 'Bearer ' + token + + r = make_request_catching_401() + except KeycloakError as e: + # Token refresh returns 400 if token is expired/invalid, so continue on if we get a 400 + if e.authError is not None and e.authError.code != 400: + raise e + + if isinstance(r, Exception): + # Try to re-auth with username/password, if available + auth_username = self.module.params.get('auth_username') + auth_password = self.module.params.get('auth_password') + if auth_username is not None and auth_password is not None: + token = _request_token_using_credentials(self.module.params) + self.restheaders['Authorization'] = 'Bearer ' + token + + r = make_request_catching_401() + + if isinstance(r, Exception): + # Try to re-auth with client_id and client_secret, if available + auth_client_id = self.module.params.get('auth_client_id') + auth_client_secret = self.module.params.get('auth_client_secret') + if auth_client_id is not None and auth_client_secret is not None: + try: + token = _request_token_using_client_credentials(self.module.params) + self.restheaders['Authorization'] = 'Bearer ' + token + + r = make_request_catching_401() + except KeycloakError as e: + # Token refresh returns 400 if token is expired/invalid, so continue on if we get a 400 + if e.authError is not None and e.authError.code != 400: + raise e + + if isinstance(r, Exception): + # Either no re-auth options were available, or they all failed + raise r + + return r + + def _request_and_deserialize(self, url, method, data=None): + """ Wraps the _request method with JSON deserialization of the response. + + :param url: request path + :param method: request method (e.g., 'GET', 'POST', etc.) + :param data: (optional) data for request + :return: raw API response + """ + return json.loads(to_native(self._request(url, method, data).read())) + def get_realm_info_by_id(self, realm='master'): """ Obtain realm public info by id @@ -238,16 +455,14 @@ class KeycloakAPI(object): realm_info_url = URL_REALM_INFO.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(realm_info_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(realm_info_url, method='GET') except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)), - exception=traceback.format_exc()) + self.fail_request(e, msg='Could not obtain realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)), exception=traceback.format_exc()) @@ -255,6 +470,37 @@ class KeycloakAPI(object): self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)), exception=traceback.format_exc()) + def get_realm_keys_metadata_by_id(self, realm='master'): + """Obtain realm public info by id + + :param realm: realm id + + :return: None, or a 'KeysMetadataRepresentation' + (https://www.keycloak.org/docs-api/latest/rest-api/index.html#KeysMetadataRepresentation) + -- a dict containing the keys 'active' and 'keys', the former containing a mapping + from algorithms to key-ids, the latter containing a list of dicts with key + information. + """ + realm_keys_metadata_url = URL_REALM_KEYS_METADATA.format(url=self.baseurl, realm=realm) + + try: + return self._request_and_deserialize(realm_keys_metadata_url, method="GET") + + except HTTPError as e: + if e.code == 404: + return None + else: + self.fail_request(e, msg='Could not obtain realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) + except ValueError as e: + self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) + except Exception as e: + self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) + + # The Keycloak API expects the realm name (like `master`) not the ID when fetching the realm data. + # See the Keycloak API docs: https://www.keycloak.org/docs-api/latest/rest-api/#_realms_admin def get_realm_by_id(self, realm='master'): """ Obtain realm representation by id @@ -264,15 +510,14 @@ class KeycloakAPI(object): realm_url = URL_REALM.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(realm_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(realm_url, method='GET') except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg='Could not obtain realm %s: %s' % (realm, str(e)), - exception=traceback.format_exc()) + self.fail_request(e, msg='Could not obtain realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain realm %s: %s' % (realm, str(e)), exception=traceback.format_exc()) @@ -289,11 +534,10 @@ class KeycloakAPI(object): realm_url = URL_REALM.format(url=self.baseurl, realm=realm) try: - return open_url(realm_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(realmrep), validate_certs=self.validate_certs) + return self._request(realm_url, method='PUT', data=json.dumps(realmrep)) except Exception as e: - self.module.fail_json(msg='Could not update realm %s: %s' % (realm, str(e)), - exception=traceback.format_exc()) + self.fail_request(e, msg='Could not update realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) def create_realm(self, realmrep): """ Create a realm in keycloak @@ -303,11 +547,10 @@ class KeycloakAPI(object): realm_url = URL_REALMS.format(url=self.baseurl) try: - return open_url(realm_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(realmrep), validate_certs=self.validate_certs) + return self._request(realm_url, method='POST', data=json.dumps(realmrep)) except Exception as e: - self.module.fail_json(msg='Could not create realm %s: %s' % (realmrep['id'], str(e)), - exception=traceback.format_exc()) + self.fail_request(e, msg='Could not create realm %s: %s' % (realmrep['id'], str(e)), + exception=traceback.format_exc()) def delete_realm(self, realm="master"): """ Delete a realm from Keycloak @@ -318,11 +561,10 @@ class KeycloakAPI(object): realm_url = URL_REALM.format(url=self.baseurl, realm=realm) try: - return open_url(realm_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(realm_url, method='DELETE') except Exception as e: - self.module.fail_json(msg='Could not delete realm %s: %s' % (realm, str(e)), - exception=traceback.format_exc()) + self.fail_request(e, msg='Could not delete realm %s: %s' % (realm, str(e)), + exception=traceback.format_exc()) def get_clients(self, realm='master', filter=None): """ Obtains client representations for clients in a realm @@ -336,15 +578,13 @@ class KeycloakAPI(object): clientlist_url += '?clientId=%s' % filter try: - return json.loads(to_native(open_url(clientlist_url, http_agent=self.http_agent, method='GET', headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(clientlist_url, method='GET') except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of clients for realm %s: %s' % (realm, str(e))) except Exception as e: - self.module.fail_json(msg='Could not obtain list of clients for realm %s: %s' - % (realm, str(e))) + self.fail_request(e, msg='Could not obtain list of clients for realm %s: %s' + % (realm, str(e))) def get_client_by_clientid(self, client_id, realm='master'): """ Get client representation by clientId @@ -368,16 +608,14 @@ class KeycloakAPI(object): client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id) try: - return json.loads(to_native(open_url(client_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(client_url, method='GET') except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg='Could not obtain client %s for realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg='Could not obtain client %s for realm %s: %s' + % (id, realm, str(e))) except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain client %s for realm %s: %s' % (id, realm, str(e))) @@ -408,11 +646,10 @@ class KeycloakAPI(object): client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id) try: - return open_url(client_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(clientrep), validate_certs=self.validate_certs) + return self._request(client_url, method='PUT', data=json.dumps(clientrep)) except Exception as e: - self.module.fail_json(msg='Could not update client %s in realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg='Could not update client %s in realm %s: %s' + % (id, realm, str(e))) def create_client(self, clientrep, realm="master"): """ Create a client in keycloak @@ -423,11 +660,10 @@ class KeycloakAPI(object): client_url = URL_CLIENTS.format(url=self.baseurl, realm=realm) try: - return open_url(client_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(clientrep), validate_certs=self.validate_certs) + return self._request(client_url, method='POST', data=json.dumps(clientrep)) except Exception as e: - self.module.fail_json(msg='Could not create client %s in realm %s: %s' - % (clientrep['clientId'], realm, str(e))) + self.fail_request(e, msg='Could not create client %s in realm %s: %s' + % (clientrep['clientId'], realm, str(e))) def delete_client(self, id, realm="master"): """ Delete a client from Keycloak @@ -439,11 +675,10 @@ class KeycloakAPI(object): client_url = URL_CLIENT.format(url=self.baseurl, realm=realm, id=id) try: - return open_url(client_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(client_url, method='DELETE') except Exception as e: - self.module.fail_json(msg='Could not delete client %s in realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg='Could not delete client %s in realm %s: %s' + % (id, realm, str(e))) def get_client_roles_by_id(self, cid, realm="master"): """ Fetch the roles of the a client on the Keycloak server. @@ -454,12 +689,10 @@ class KeycloakAPI(object): """ client_roles_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid) try: - return json.loads(to_native(open_url(client_roles_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(client_roles_url, method="GET") except Exception as e: - self.module.fail_json(msg="Could not fetch rolemappings for client %s in realm %s: %s" - % (cid, realm, str(e))) + self.fail_request(e, msg="Could not fetch rolemappings for client %s in realm %s: %s" + % (cid, realm, str(e))) def get_client_role_id_by_name(self, cid, name, realm="master"): """ Get the role ID of a client. @@ -486,19 +719,17 @@ class KeycloakAPI(object): """ rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) try: - rolemappings = json.loads(to_native(open_url(rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + rolemappings = self._request_and_deserialize(rolemappings_url, method="GET") for role in rolemappings: if rid == role['id']: return role except Exception as e: - self.module.fail_json(msg="Could not fetch rolemappings for client %s in group %s, realm %s: %s" - % (cid, gid, realm, str(e))) + self.fail_request(e, msg="Could not fetch rolemappings for client %s in group %s, realm %s: %s" + % (cid, gid, realm, str(e))) return None def get_client_group_available_rolemappings(self, gid, cid, realm="master"): - """ Fetch the available role of a client in a specified goup on the Keycloak server. + """ Fetch the available role of a client in a specified group on the Keycloak server. :param gid: ID of the group from which to obtain the rolemappings. :param cid: ID of the client from which to obtain the rolemappings. @@ -507,12 +738,10 @@ class KeycloakAPI(object): """ available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=gid, client=cid) try: - return json.loads(to_native(open_url(available_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(available_rolemappings_url, method="GET") except Exception as e: - self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" - % (cid, gid, realm, str(e))) + self.fail_request(e, msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" + % (cid, gid, realm, str(e))) def get_client_group_composite_rolemappings(self, gid, cid, realm="master"): """ Fetch the composite role of a client in a specified group on the Keycloak server. @@ -524,12 +753,10 @@ class KeycloakAPI(object): """ composite_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=gid, client=cid) try: - return json.loads(to_native(open_url(composite_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(composite_rolemappings_url, method="GET") except Exception as e: - self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" - % (cid, gid, realm, str(e))) + self.fail_request(e, msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" + % (cid, gid, realm, str(e))) def get_role_by_id(self, rid, realm="master"): """ Fetch a role by its id on the Keycloak server. @@ -540,12 +767,10 @@ class KeycloakAPI(object): """ client_roles_url = URL_ROLES_BY_ID.format(url=self.baseurl, realm=realm, id=rid) try: - return json.loads(to_native(open_url(client_roles_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(client_roles_url, method="GET") except Exception as e: - self.module.fail_json(msg="Could not fetch role for id %s in realm %s: %s" - % (rid, realm, str(e))) + self.fail_request(e, msg="Could not fetch role for id %s in realm %s: %s" + % (rid, realm, str(e))) def get_client_roles_by_id_composite_rolemappings(self, rid, cid, realm="master"): """ Fetch a role by its id on the Keycloak server. @@ -557,12 +782,10 @@ class KeycloakAPI(object): """ client_roles_url = URL_ROLES_BY_ID_COMPOSITES_CLIENTS.format(url=self.baseurl, realm=realm, id=rid, cid=cid) try: - return json.loads(to_native(open_url(client_roles_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(client_roles_url, method="GET") except Exception as e: - self.module.fail_json(msg="Could not fetch role for id %s and cid %s in realm %s: %s" - % (rid, cid, realm, str(e))) + self.fail_request(e, msg="Could not fetch role for id %s and cid %s in realm %s: %s" + % (rid, cid, realm, str(e))) def add_client_roles_by_id_composite_rolemapping(self, rid, roles_rep, realm="master"): """ Assign roles to composite role @@ -574,14 +797,43 @@ class KeycloakAPI(object): """ available_rolemappings_url = URL_ROLES_BY_ID_COMPOSITES.format(url=self.baseurl, realm=realm, id=rid) try: - open_url(available_rolemappings_url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(roles_rep), - validate_certs=self.validate_certs, timeout=self.connection_timeout) + self._request(available_rolemappings_url, method="POST", data=json.dumps(roles_rep)) except Exception as e: - self.module.fail_json(msg="Could not assign roles to composite role %s and realm %s: %s" - % (rid, realm, str(e))) + self.fail_request(e, msg="Could not assign roles to composite role %s and realm %s: %s" + % (rid, realm, str(e))) + + def add_group_realm_rolemapping(self, gid, role_rep, realm="master"): + """ Add the specified realm role to specified group on the Keycloak server. + + :param gid: ID of the group to add the role mapping. + :param role_rep: Representation of the role to assign. + :param realm: Realm from which to obtain the rolemappings. + :return: None. + """ + url = URL_REALM_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, group=gid) + try: + self._request(url, method="POST", data=json.dumps(role_rep)) + except Exception as e: + self.fail_request(e, msg="Could add realm role mappings for group %s, realm %s: %s" + % (gid, realm, str(e))) + + def delete_group_realm_rolemapping(self, gid, role_rep, realm="master"): + """ Delete the specified realm role from the specified group on the Keycloak server. + + :param gid: ID of the group from which to obtain the rolemappings. + :param role_rep: Representation of the role to assign. + :param realm: Realm from which to obtain the rolemappings. + :return: None. + """ + url = URL_REALM_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, group=gid) + try: + self._request(url, method="DELETE", data=json.dumps(role_rep)) + except Exception as e: + self.fail_request(e, msg="Could not delete realm role mappings for group %s, realm %s: %s" + % (gid, realm, str(e))) def add_group_rolemapping(self, gid, cid, role_rep, realm="master"): - """ Fetch the composite role of a client in a specified goup on the Keycloak server. + """ Fetch the composite role of a client in a specified group on the Keycloak server. :param gid: ID of the group from which to obtain the rolemappings. :param cid: ID of the client from which to obtain the rolemappings. @@ -591,11 +843,10 @@ class KeycloakAPI(object): """ available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) try: - open_url(available_rolemappings_url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep), - validate_certs=self.validate_certs, timeout=self.connection_timeout) + self._request(available_rolemappings_url, method="POST", data=json.dumps(role_rep)) except Exception as e: - self.module.fail_json(msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" - % (cid, gid, realm, str(e))) + self.fail_request(e, msg="Could not fetch available rolemappings for client %s in group %s, realm %s: %s" + % (cid, gid, realm, str(e))) def delete_group_rolemapping(self, gid, cid, role_rep, realm="master"): """ Delete the rolemapping of a client in a specified group on the Keycloak server. @@ -608,11 +859,10 @@ class KeycloakAPI(object): """ available_rolemappings_url = URL_CLIENT_GROUP_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=gid, client=cid) try: - open_url(available_rolemappings_url, method="DELETE", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep), - validate_certs=self.validate_certs, timeout=self.connection_timeout) + self._request(available_rolemappings_url, method="DELETE", data=json.dumps(role_rep)) except Exception as e: - self.module.fail_json(msg="Could not delete available rolemappings for client %s in group %s, realm %s: %s" - % (cid, gid, realm, str(e))) + self.fail_request(e, msg="Could not delete available rolemappings for client %s in group %s, realm %s: %s" + % (cid, gid, realm, str(e))) def get_client_user_rolemapping_by_id(self, uid, cid, rid, realm='master'): """ Obtain client representation by id @@ -625,15 +875,13 @@ class KeycloakAPI(object): """ rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid, client=cid) try: - rolemappings = json.loads(to_native(open_url(rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + rolemappings = self._request_and_deserialize(rolemappings_url, method="GET") for role in rolemappings: if rid == role['id']: return role except Exception as e: - self.module.fail_json(msg="Could not fetch rolemappings for client %s and user %s, realm %s: %s" - % (cid, uid, realm, str(e))) + self.fail_request(e, msg="Could not fetch rolemappings for client %s and user %s, realm %s: %s" + % (cid, uid, realm, str(e))) return None def get_client_user_available_rolemappings(self, uid, cid, realm="master"): @@ -646,12 +894,10 @@ class KeycloakAPI(object): """ available_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=uid, client=cid) try: - return json.loads(to_native(open_url(available_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(available_rolemappings_url, method="GET") except Exception as e: - self.module.fail_json(msg="Could not fetch effective rolemappings for client %s and user %s, realm %s: %s" - % (cid, uid, realm, str(e))) + self.fail_request(e, msg="Could not fetch effective rolemappings for client %s and user %s, realm %s: %s" + % (cid, uid, realm, str(e))) def get_client_user_composite_rolemappings(self, uid, cid, realm="master"): """ Fetch the composite role of a client for a specified user on the Keycloak server. @@ -663,12 +909,10 @@ class KeycloakAPI(object): """ composite_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=uid, client=cid) try: - return json.loads(to_native(open_url(composite_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(composite_rolemappings_url, method="GET") except Exception as e: - self.module.fail_json(msg="Could not fetch available rolemappings for user %s of realm %s: %s" - % (uid, realm, str(e))) + self.fail_request(e, msg="Could not fetch available rolemappings for user %s of realm %s: %s" + % (uid, realm, str(e))) def get_realm_user_rolemapping_by_id(self, uid, rid, realm='master'): """ Obtain role representation by id @@ -680,15 +924,13 @@ class KeycloakAPI(object): """ rolemappings_url = URL_REALM_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid) try: - rolemappings = json.loads(to_native(open_url(rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + rolemappings = self._request_and_deserialize(rolemappings_url, method="GET") for role in rolemappings: if rid == role['id']: return role except Exception as e: - self.module.fail_json(msg="Could not fetch rolemappings for user %s, realm %s: %s" - % (uid, realm, str(e))) + self.fail_request(e, msg="Could not fetch rolemappings for user %s, realm %s: %s" + % (uid, realm, str(e))) return None def get_realm_user_available_rolemappings(self, uid, realm="master"): @@ -700,12 +942,10 @@ class KeycloakAPI(object): """ available_rolemappings_url = URL_REALM_ROLEMAPPINGS_AVAILABLE.format(url=self.baseurl, realm=realm, id=uid) try: - return json.loads(to_native(open_url(available_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(available_rolemappings_url, method="GET") except Exception as e: - self.module.fail_json(msg="Could not fetch available rolemappings for user %s of realm %s: %s" - % (uid, realm, str(e))) + self.fail_request(e, msg="Could not fetch available rolemappings for user %s of realm %s: %s" + % (uid, realm, str(e))) def get_realm_user_composite_rolemappings(self, uid, realm="master"): """ Fetch the composite role of a realm for a specified user on the Keycloak server. @@ -716,12 +956,10 @@ class KeycloakAPI(object): """ composite_rolemappings_url = URL_REALM_ROLEMAPPINGS_COMPOSITE.format(url=self.baseurl, realm=realm, id=uid) try: - return json.loads(to_native(open_url(composite_rolemappings_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(composite_rolemappings_url, method="GET") except Exception as e: - self.module.fail_json(msg="Could not fetch effective rolemappings for user %s, realm %s: %s" - % (uid, realm, str(e))) + self.fail_request(e, msg="Could not fetch effective rolemappings for user %s, realm %s: %s" + % (uid, realm, str(e))) def get_user_by_username(self, username, realm="master"): """ Fetch a keycloak user within a realm based on its username. @@ -733,14 +971,20 @@ class KeycloakAPI(object): users_url = URL_USERS.format(url=self.baseurl, realm=realm) users_url += '?username=%s&exact=true' % username try: - return json.loads(to_native(open_url(users_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + userrep = None + users = self._request_and_deserialize(users_url, method='GET') + for user in users: + if user['username'] == username: + userrep = user + break + return userrep + except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain the user for realm %s and username %s: %s' % (realm, username, str(e))) except Exception as e: - self.module.fail_json(msg='Could not obtain the user for realm %s and username %s: %s' - % (realm, username, str(e))) + self.fail_request(e, msg='Could not obtain the user for realm %s and username %s: %s' + % (realm, username, str(e))) def get_service_account_user_by_client_id(self, client_id, realm="master"): """ Fetch a keycloak service account user within a realm based on its client_id. @@ -753,14 +997,13 @@ class KeycloakAPI(object): service_account_user_url = URL_CLIENT_SERVICE_ACCOUNT_USER.format(url=self.baseurl, realm=realm, id=cid) try: - return json.loads(to_native(open_url(service_account_user_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(service_account_user_url, method='GET') except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain the service-account-user for realm %s and client_id %s: %s' % (realm, client_id, str(e))) except Exception as e: - self.module.fail_json(msg='Could not obtain the service-account-user for realm %s and client_id %s: %s' - % (realm, client_id, str(e))) + self.fail_request(e, msg='Could not obtain the service-account-user for realm %s and client_id %s: %s' + % (realm, client_id, str(e))) def add_user_rolemapping(self, uid, cid, role_rep, realm="master"): """ Assign a realm or client role to a specified user on the Keycloak server. @@ -774,19 +1017,17 @@ class KeycloakAPI(object): if cid is None: user_realm_rolemappings_url = URL_REALM_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid) try: - open_url(user_realm_rolemappings_url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep), - validate_certs=self.validate_certs, timeout=self.connection_timeout) + self._request(user_realm_rolemappings_url, method="POST", data=json.dumps(role_rep)) except Exception as e: - self.module.fail_json(msg="Could not map roles to userId %s for realm %s and roles %s: %s" - % (uid, realm, json.dumps(role_rep), str(e))) + self.fail_request(e, msg="Could not map roles to userId %s for realm %s and roles %s: %s" + % (uid, realm, json.dumps(role_rep), str(e))) else: user_client_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid, client=cid) try: - open_url(user_client_rolemappings_url, method="POST", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep), - validate_certs=self.validate_certs, timeout=self.connection_timeout) + self._request(user_client_rolemappings_url, method="POST", data=json.dumps(role_rep)) except Exception as e: - self.module.fail_json(msg="Could not map roles to userId %s for client %s, realm %s and roles %s: %s" - % (cid, uid, realm, json.dumps(role_rep), str(e))) + self.fail_request(e, msg="Could not map roles to userId %s for client %s, realm %s and roles %s: %s" + % (cid, uid, realm, json.dumps(role_rep), str(e))) def delete_user_rolemapping(self, uid, cid, role_rep, realm="master"): """ Delete the rolemapping of a client in a specified user on the Keycloak server. @@ -800,19 +1041,17 @@ class KeycloakAPI(object): if cid is None: user_realm_rolemappings_url = URL_REALM_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid) try: - open_url(user_realm_rolemappings_url, method="DELETE", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep), - validate_certs=self.validate_certs, timeout=self.connection_timeout) + self._request(user_realm_rolemappings_url, method="DELETE", data=json.dumps(role_rep)) except Exception as e: - self.module.fail_json(msg="Could not remove roles %s from userId %s, realm %s: %s" - % (json.dumps(role_rep), uid, realm, str(e))) + self.fail_request(e, msg="Could not remove roles %s from userId %s, realm %s: %s" + % (json.dumps(role_rep), uid, realm, str(e))) else: user_client_rolemappings_url = URL_CLIENT_USER_ROLEMAPPINGS.format(url=self.baseurl, realm=realm, id=uid, client=cid) try: - open_url(user_client_rolemappings_url, method="DELETE", http_agent=self.http_agent, headers=self.restheaders, data=json.dumps(role_rep), - validate_certs=self.validate_certs, timeout=self.connection_timeout) + self._request(user_client_rolemappings_url, method="DELETE", data=json.dumps(role_rep)) except Exception as e: - self.module.fail_json(msg="Could not remove roles %s for client %s from userId %s, realm %s: %s" - % (json.dumps(role_rep), cid, uid, realm, str(e))) + self.fail_request(e, msg="Could not remove roles %s for client %s from userId %s, realm %s: %s" + % (json.dumps(role_rep), cid, uid, realm, str(e))) def get_client_templates(self, realm='master'): """ Obtains client template representations for client templates in a realm @@ -823,14 +1062,13 @@ class KeycloakAPI(object): url = URL_CLIENTTEMPLATES.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(url, method='GET', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(url, method='GET') except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of client templates for realm %s: %s' % (realm, str(e))) except Exception as e: - self.module.fail_json(msg='Could not obtain list of client templates for realm %s: %s' - % (realm, str(e))) + self.fail_request(e, msg='Could not obtain list of client templates for realm %s: %s' + % (realm, str(e))) def get_client_template_by_id(self, id, realm='master'): """ Obtain client template representation by id @@ -842,14 +1080,13 @@ class KeycloakAPI(object): url = URL_CLIENTTEMPLATE.format(url=self.baseurl, id=id, realm=realm) try: - return json.loads(to_native(open_url(url, method='GET', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(url, method='GET') except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain client templates %s for realm %s: %s' % (id, realm, str(e))) except Exception as e: - self.module.fail_json(msg='Could not obtain client template %s for realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg='Could not obtain client template %s for realm %s: %s' + % (id, realm, str(e))) def get_client_template_by_name(self, name, realm='master'): """ Obtain client template representation by name @@ -888,11 +1125,10 @@ class KeycloakAPI(object): url = URL_CLIENTTEMPLATE.format(url=self.baseurl, realm=realm, id=id) try: - return open_url(url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(clienttrep), validate_certs=self.validate_certs) + return self._request(url, method='PUT', data=json.dumps(clienttrep)) except Exception as e: - self.module.fail_json(msg='Could not update client template %s in realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg='Could not update client template %s in realm %s: %s' + % (id, realm, str(e))) def create_client_template(self, clienttrep, realm="master"): """ Create a client in keycloak @@ -903,11 +1139,10 @@ class KeycloakAPI(object): url = URL_CLIENTTEMPLATES.format(url=self.baseurl, realm=realm) try: - return open_url(url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(clienttrep), validate_certs=self.validate_certs) + return self._request(url, method='POST', data=json.dumps(clienttrep)) except Exception as e: - self.module.fail_json(msg='Could not create client template %s in realm %s: %s' - % (clienttrep['clientId'], realm, str(e))) + self.fail_request(e, msg='Could not create client template %s in realm %s: %s' + % (clienttrep['clientId'], realm, str(e))) def delete_client_template(self, id, realm="master"): """ Delete a client template from Keycloak @@ -919,11 +1154,10 @@ class KeycloakAPI(object): url = URL_CLIENTTEMPLATE.format(url=self.baseurl, realm=realm, id=id) try: - return open_url(url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(url, method='DELETE') except Exception as e: - self.module.fail_json(msg='Could not delete client template %s in realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg='Could not delete client template %s in realm %s: %s' + % (id, realm, str(e))) def get_clientscopes(self, realm="master"): """ Fetch the name and ID of all clientscopes on the Keycloak server. @@ -936,12 +1170,10 @@ class KeycloakAPI(object): """ clientscopes_url = URL_CLIENTSCOPES.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(clientscopes_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(clientscopes_url, method="GET") except Exception as e: - self.module.fail_json(msg="Could not fetch list of clientscopes in realm %s: %s" - % (realm, str(e))) + self.fail_request(e, msg="Could not fetch list of clientscopes in realm %s: %s" + % (realm, str(e))) def get_clientscope_by_clientscopeid(self, cid, realm="master"): """ Fetch a keycloak clientscope from the provided realm using the clientscope's unique ID. @@ -954,16 +1186,14 @@ class KeycloakAPI(object): """ clientscope_url = URL_CLIENTSCOPE.format(url=self.baseurl, realm=realm, id=cid) try: - return json.loads(to_native(open_url(clientscope_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(clientscope_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg="Could not fetch clientscope %s in realm %s: %s" - % (cid, realm, str(e))) + self.fail_request(e, msg="Could not fetch clientscope %s in realm %s: %s" + % (cid, realm, str(e))) except Exception as e: self.module.fail_json(msg="Could not clientscope group %s in realm %s: %s" % (cid, realm, str(e))) @@ -1000,11 +1230,10 @@ class KeycloakAPI(object): """ clientscopes_url = URL_CLIENTSCOPES.format(url=self.baseurl, realm=realm) try: - return open_url(clientscopes_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(clientscoperep), validate_certs=self.validate_certs) + return self._request(clientscopes_url, method='POST', data=json.dumps(clientscoperep)) except Exception as e: - self.module.fail_json(msg="Could not create clientscope %s in realm %s: %s" - % (clientscoperep['name'], realm, str(e))) + self.fail_request(e, msg="Could not create clientscope %s in realm %s: %s" + % (clientscoperep['name'], realm, str(e))) def update_clientscope(self, clientscoperep, realm="master"): """ Update an existing clientscope. @@ -1015,12 +1244,11 @@ class KeycloakAPI(object): clientscope_url = URL_CLIENTSCOPE.format(url=self.baseurl, realm=realm, id=clientscoperep['id']) try: - return open_url(clientscope_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(clientscoperep), validate_certs=self.validate_certs) + return self._request(clientscope_url, method='PUT', data=json.dumps(clientscoperep)) except Exception as e: - self.module.fail_json(msg='Could not update clientscope %s in realm %s: %s' - % (clientscoperep['name'], realm, str(e))) + self.fail_request(e, msg='Could not update clientscope %s in realm %s: %s' + % (clientscoperep['name'], realm, str(e))) def delete_clientscope(self, name=None, cid=None, realm="master"): """ Delete a clientscope. One of name or cid must be provided. @@ -1037,8 +1265,8 @@ class KeycloakAPI(object): # prefer an exception since this is almost certainly a programming error in the module itself. raise Exception("Unable to delete group - one of group ID or name must be provided.") - # only lookup the name if cid isn't provided. - # in the case that both are provided, prefer the ID, since it's one + # only lookup the name if cid is not provided. + # in the case that both are provided, prefer the ID, since it is one # less lookup. if cid is None and name is not None: for clientscope in self.get_clientscopes(realm=realm): @@ -1053,11 +1281,10 @@ class KeycloakAPI(object): # should have a good cid by here. clientscope_url = URL_CLIENTSCOPE.format(realm=realm, id=cid, url=self.baseurl) try: - return open_url(clientscope_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(clientscope_url, method='DELETE') except Exception as e: - self.module.fail_json(msg="Unable to delete clientscope %s: %s" % (cid, str(e))) + self.fail_request(e, msg="Unable to delete clientscope %s: %s" % (cid, str(e))) def get_clientscope_protocolmappers(self, cid, realm="master"): """ Fetch the name and ID of all clientscopes on the Keycloak server. @@ -1071,12 +1298,10 @@ class KeycloakAPI(object): """ protocolmappers_url = URL_CLIENTSCOPE_PROTOCOLMAPPERS.format(id=cid, url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(protocolmappers_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(protocolmappers_url, method="GET") except Exception as e: - self.module.fail_json(msg="Could not fetch list of protocolmappers in realm %s: %s" - % (realm, str(e))) + self.fail_request(e, msg="Could not fetch list of protocolmappers in realm %s: %s" + % (realm, str(e))) def get_clientscope_protocolmapper_by_protocolmapperid(self, pid, cid, realm="master"): """ Fetch a keycloak clientscope from the provided realm using the clientscope's unique ID. @@ -1091,16 +1316,14 @@ class KeycloakAPI(object): """ protocolmapper_url = URL_CLIENTSCOPE_PROTOCOLMAPPER.format(url=self.baseurl, realm=realm, id=cid, mapper_id=pid) try: - return json.loads(to_native(open_url(protocolmapper_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(protocolmapper_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg="Could not fetch protocolmapper %s in realm %s: %s" - % (pid, realm, str(e))) + self.fail_request(e, msg="Could not fetch protocolmapper %s in realm %s: %s" + % (pid, realm, str(e))) except Exception as e: self.module.fail_json(msg="Could not fetch protocolmapper %s in realm %s: %s" % (cid, realm, str(e))) @@ -1139,11 +1362,10 @@ class KeycloakAPI(object): """ protocolmappers_url = URL_CLIENTSCOPE_PROTOCOLMAPPERS.format(url=self.baseurl, id=cid, realm=realm) try: - return open_url(protocolmappers_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(mapper_rep), validate_certs=self.validate_certs) + return self._request(protocolmappers_url, method='POST', data=json.dumps(mapper_rep)) except Exception as e: - self.module.fail_json(msg="Could not create protocolmapper %s in realm %s: %s" - % (mapper_rep['name'], realm, str(e))) + self.fail_request(e, msg="Could not create protocolmapper %s in realm %s: %s" + % (mapper_rep['name'], realm, str(e))) def update_clientscope_protocolmappers(self, cid, mapper_rep, realm="master"): """ Update an existing clientscope. @@ -1155,12 +1377,133 @@ class KeycloakAPI(object): protocolmapper_url = URL_CLIENTSCOPE_PROTOCOLMAPPER.format(url=self.baseurl, realm=realm, id=cid, mapper_id=mapper_rep['id']) try: - return open_url(protocolmapper_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(mapper_rep), validate_certs=self.validate_certs) + return self._request(protocolmapper_url, method='PUT', data=json.dumps(mapper_rep)) except Exception as e: - self.module.fail_json(msg='Could not update protocolmappers for clientscope %s in realm %s: %s' - % (mapper_rep, realm, str(e))) + self.fail_request(e, msg='Could not update protocolmappers for clientscope %s in realm %s: %s' + % (mapper_rep, realm, str(e))) + + def get_default_clientscopes(self, realm, client_id=None): + """Fetch the name and ID of all clientscopes on the Keycloak server. + + To fetch the full data of the client scope, make a subsequent call to + get_clientscope_by_clientscopeid, passing in the ID of the client scope you wish to return. + + :param realm: Realm in which the clientscope resides. + :param client_id: The client in which the clientscope resides. + :return The default clientscopes of this realm or client + """ + url = URL_DEFAULT_CLIENTSCOPES if client_id is None else URL_CLIENT_DEFAULT_CLIENTSCOPES + return self._get_clientscopes_of_type(realm, url, 'default', client_id) + + def get_optional_clientscopes(self, realm, client_id=None): + """Fetch the name and ID of all clientscopes on the Keycloak server. + + To fetch the full data of the client scope, make a subsequent call to + get_clientscope_by_clientscopeid, passing in the ID of the client scope you wish to return. + + :param realm: Realm in which the clientscope resides. + :param client_id: The client in which the clientscope resides. + :return The optional clientscopes of this realm or client + """ + url = URL_OPTIONAL_CLIENTSCOPES if client_id is None else URL_CLIENT_OPTIONAL_CLIENTSCOPES + return self._get_clientscopes_of_type(realm, url, 'optional', client_id) + + def _get_clientscopes_of_type(self, realm, url_template, scope_type, client_id=None): + """Fetch the name and ID of all clientscopes on the Keycloak server. + + To fetch the full data of the client scope, make a subsequent call to + get_clientscope_by_clientscopeid, passing in the ID of the client scope you wish to return. + + :param realm: Realm in which the clientscope resides. + :param url_template the template for the right type + :param scope_type this can be either optional or default + :param client_id: The client in which the clientscope resides. + :return The clientscopes of the specified type of this realm + """ + if client_id is None: + clientscopes_url = url_template.format(url=self.baseurl, realm=realm) + try: + return self._request_and_deserialize(clientscopes_url, method="GET") + except Exception as e: + self.fail_request(e, msg="Could not fetch list of %s clientscopes in realm %s: %s" % (scope_type, realm, str(e))) + else: + cid = self.get_client_id(client_id=client_id, realm=realm) + clientscopes_url = url_template.format(url=self.baseurl, realm=realm, cid=cid) + try: + return self._request_and_deserialize(clientscopes_url, method="GET") + except Exception as e: + self.fail_request(e, msg="Could not fetch list of %s clientscopes in client %s: %s" % (scope_type, client_id, clientscopes_url)) + + def _decide_url_type_clientscope(self, client_id=None, scope_type="default"): + """Decides which url to use. + :param scope_type this can be either optional or default + :param client_id: The client in which the clientscope resides. + """ + if client_id is None: + if scope_type == "default": + return URL_DEFAULT_CLIENTSCOPE + if scope_type == "optional": + return URL_OPTIONAL_CLIENTSCOPE + else: + if scope_type == "default": + return URL_CLIENT_DEFAULT_CLIENTSCOPE + if scope_type == "optional": + return URL_CLIENT_OPTIONAL_CLIENTSCOPE + + def add_default_clientscope(self, id, realm="master", client_id=None): + """Add a client scope as default either on realm or client level. + + :param id: Client scope Id. + :param realm: Realm in which the clientscope resides. + :param client_id: The client in which the clientscope resides. + """ + self._action_type_clientscope(id, client_id, "default", realm, 'add') + + def add_optional_clientscope(self, id, realm="master", client_id=None): + """Add a client scope as optional either on realm or client level. + + :param id: Client scope Id. + :param realm: Realm in which the clientscope resides. + :param client_id: The client in which the clientscope resides. + """ + self._action_type_clientscope(id, client_id, "optional", realm, 'add') + + def delete_default_clientscope(self, id, realm="master", client_id=None): + """Remove a client scope as default either on realm or client level. + + :param id: Client scope Id. + :param realm: Realm in which the clientscope resides. + :param client_id: The client in which the clientscope resides. + """ + self._action_type_clientscope(id, client_id, "default", realm, 'delete') + + def delete_optional_clientscope(self, id, realm="master", client_id=None): + """Remove a client scope as optional either on realm or client level. + + :param id: Client scope Id. + :param realm: Realm in which the clientscope resides. + :param client_id: The client in which the clientscope resides. + """ + self._action_type_clientscope(id, client_id, "optional", realm, 'delete') + + def _action_type_clientscope(self, id=None, client_id=None, scope_type="default", realm="master", action='add'): + """ Delete or add a clientscope of type. + :param name: The name of the clientscope. A lookup will be performed to retrieve the clientscope ID. + :param client_id: The ID of the clientscope (preferred to name). + :param scope_type 'default' or 'optional' + :param realm: The realm in which this group resides, default "master". + """ + cid = None if client_id is None else self.get_client_id(client_id=client_id, realm=realm) + # should have a good cid by here. + clientscope_type_url = self._decide_url_type_clientscope(client_id, scope_type).format(realm=realm, id=id, cid=cid, url=self.baseurl) + try: + method = 'PUT' if action == "add" else 'DELETE' + return self._request(clientscope_type_url, method=method) + + except Exception as e: + place = 'realm' if client_id is None else 'client ' + client_id + self.fail_request(e, msg="Unable to %s %s clientscope %s @ %s : %s" % (action, scope_type, id, place, str(e))) def create_clientsecret(self, id, realm="master"): """ Generate a new client secret by id @@ -1172,15 +1515,14 @@ class KeycloakAPI(object): clientsecret_url = URL_CLIENTSECRET.format(url=self.baseurl, realm=realm, id=id) try: - return json.loads(to_native(open_url(clientsecret_url, method='POST', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(clientsecret_url, method='POST') except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg='Could not obtain clientsecret of client %s for realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg='Could not obtain clientsecret of client %s for realm %s: %s' + % (id, realm, str(e))) except Exception as e: self.module.fail_json(msg='Could not obtain clientsecret of client %s for realm %s: %s' % (id, realm, str(e))) @@ -1195,15 +1537,14 @@ class KeycloakAPI(object): clientsecret_url = URL_CLIENTSECRET.format(url=self.baseurl, realm=realm, id=id) try: - return json.loads(to_native(open_url(clientsecret_url, method='GET', headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(clientsecret_url, method='GET') except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg='Could not obtain clientsecret of client %s for realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg='Could not obtain clientsecret of client %s for realm %s: %s' + % (id, realm, str(e))) except Exception as e: self.module.fail_json(msg='Could not obtain clientsecret of client %s for realm %s: %s' % (id, realm, str(e))) @@ -1218,12 +1559,10 @@ class KeycloakAPI(object): """ groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(groups_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(groups_url, method="GET") except Exception as e: - self.module.fail_json(msg="Could not fetch list of groups in realm %s: %s" - % (realm, str(e))) + self.fail_request(e, msg="Could not fetch list of groups in realm %s: %s" + % (realm, str(e))) def get_group_by_groupid(self, gid, realm="master"): """ Fetch a keycloak group from the provided realm using the group's unique ID. @@ -1236,20 +1575,33 @@ class KeycloakAPI(object): """ groups_url = URL_GROUP.format(url=self.baseurl, realm=realm, groupid=gid) try: - return json.loads(to_native(open_url(groups_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(groups_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg="Could not fetch group %s in realm %s: %s" - % (gid, realm, str(e))) + self.fail_request(e, msg="Could not fetch group %s in realm %s: %s" + % (gid, realm, str(e))) except Exception as e: self.module.fail_json(msg="Could not fetch group %s in realm %s: %s" % (gid, realm, str(e))) - def get_group_by_name(self, name, realm="master"): + def get_subgroups(self, parent, realm="master"): + if 'subGroupCount' in parent: + # Since version 23, when GETting a group Keycloak does not + # return subGroups but only a subGroupCount. + # Children must be fetched in a second request. + if parent['subGroupCount'] == 0: + group_children = [] + else: + group_children_url = URL_GROUP_CHILDREN.format(url=self.baseurl, realm=realm, groupid=parent['id']) + "?max=" + str(parent['subGroupCount']) + group_children = self._request_and_deserialize(group_children_url, method="GET") + subgroups = group_children + else: + subgroups = parent['subGroups'] + return subgroups + + def get_group_by_name(self, name, realm="master", parents=None): """ Fetch a keycloak group within a realm based on its name. The Keycloak API does not allow filtering of the Groups resource by name. @@ -1259,10 +1611,18 @@ class KeycloakAPI(object): If the group does not exist, None is returned. :param name: Name of the group to fetch. :param realm: Realm in which the group resides; default 'master' + :param parents: Optional list of parents when group to look for is a subgroup """ - groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm) try: - all_groups = self.get_groups(realm=realm) + if parents: + parent = self.get_subgroup_direct_parent(parents, realm) + + if not parent: + return None + + all_groups = self.get_subgroups(parent, realm) + else: + all_groups = self.get_groups(realm=realm) for group in all_groups: if group['name'] == name: @@ -1274,6 +1634,102 @@ class KeycloakAPI(object): self.module.fail_json(msg="Could not fetch group %s in realm %s: %s" % (name, realm, str(e))) + def _get_normed_group_parent(self, parent): + """ Converts parent dict information into a more easy to use form. + + :param parent: parent describing dict + """ + if parent['id']: + return (parent['id'], True) + + return (parent['name'], False) + + def get_subgroup_by_chain(self, name_chain, realm="master"): + """ Access a subgroup API object by walking down a given name/id chain. + + Groups can be given either as by name or by ID, the first element + must either be a toplvl group or given as ID, all parents must exist. + + If the group cannot be found, None is returned. + :param name_chain: Topdown ordered list of subgroup parent (ids or names) + its own name at the end + :param realm: Realm in which the group resides; default 'master' + """ + cp = name_chain[0] + + # for 1st parent in chain we must query the server + cp, is_id = self._get_normed_group_parent(cp) + + if is_id: + tmp = self.get_group_by_groupid(cp, realm=realm) + else: + # given as name, assume toplvl group + tmp = self.get_group_by_name(cp, realm=realm) + + if not tmp: + return None + + for p in name_chain[1:]: + for sg in self.get_subgroups(tmp, realm): + pv, is_id = self._get_normed_group_parent(p) + + if is_id: + cmpkey = "id" + else: + cmpkey = "name" + + if pv == sg[cmpkey]: + tmp = sg + break + + if not tmp: + return None + + return tmp + + def get_subgroup_direct_parent(self, parents, realm="master", children_to_resolve=None): + """ Get keycloak direct parent group API object for a given chain of parents. + + To successfully work the API for subgroups we actually don't need + to "walk the whole tree" for nested groups but only need to know + the ID for the direct predecessor of current subgroup. This + method will guarantee us this information getting there with + as minimal work as possible. + + Note that given parent list can and might be incomplete at the + upper levels as long as it starts with an ID instead of a name + + If the group does not exist, None is returned. + :param parents: Topdown ordered list of subgroup parents + :param realm: Realm in which the group resides; default 'master' + """ + if children_to_resolve is None: + # start recursion by reversing parents (in optimal cases + # we dont need to walk the whole tree upwarts) + parents = list(reversed(parents)) + children_to_resolve = [] + + if not parents: + # walk complete parents list to the top, all names, no id's, + # try to resolve it assuming list is complete and 1st + # element is a toplvl group + return self.get_subgroup_by_chain(list(reversed(children_to_resolve)), realm=realm) + + cp = parents[0] + unused, is_id = self._get_normed_group_parent(cp) + + if is_id: + # current parent is given as ID, we can stop walking + # upwards searching for an entry point + return self.get_subgroup_by_chain([cp] + list(reversed(children_to_resolve)), realm=realm) + else: + # current parent is given as name, it must be resolved + # later, try next parent (recurse) + children_to_resolve.append(cp) + return self.get_subgroup_direct_parent( + parents[1:], + realm=realm, children_to_resolve=children_to_resolve + ) + def create_group(self, grouprep, realm="master"): """ Create a Keycloak group. @@ -1282,11 +1738,37 @@ class KeycloakAPI(object): """ groups_url = URL_GROUPS.format(url=self.baseurl, realm=realm) try: - return open_url(groups_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(grouprep), validate_certs=self.validate_certs) + return self._request(groups_url, method='POST', data=json.dumps(grouprep)) except Exception as e: - self.module.fail_json(msg="Could not create group %s in realm %s: %s" - % (grouprep['name'], realm, str(e))) + self.fail_request(e, msg="Could not create group %s in realm %s: %s" + % (grouprep['name'], realm, str(e))) + + def create_subgroup(self, parents, grouprep, realm="master"): + """ Create a Keycloak subgroup. + + :param parents: list of one or more parent groups + :param grouprep: a GroupRepresentation of the group to be created. Must contain at minimum the field name. + :return: HTTPResponse object on success + """ + parent_id = "---UNDETERMINED---" + try: + parent_id = self.get_subgroup_direct_parent(parents, realm) + + if not parent_id: + raise Exception( + "Could not determine subgroup parent ID for given" + " parent chain {0}. Assure that all parents exist" + " already and the list is complete and properly" + " ordered, starts with an ID or starts at the" + " top level".format(parents) + ) + + parent_id = parent_id["id"] + url = URL_GROUP_CHILDREN.format(url=self.baseurl, realm=realm, groupid=parent_id) + return self._request(url, method='POST', data=json.dumps(grouprep)) + except Exception as e: + self.fail_request(e, msg="Could not create subgroup %s for parent group %s in realm %s: %s" + % (grouprep['name'], parent_id, realm, str(e))) def update_group(self, grouprep, realm="master"): """ Update an existing group. @@ -1297,11 +1779,10 @@ class KeycloakAPI(object): group_url = URL_GROUP.format(url=self.baseurl, realm=realm, groupid=grouprep['id']) try: - return open_url(group_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(grouprep), validate_certs=self.validate_certs) + return self._request(group_url, method='PUT', data=json.dumps(grouprep)) except Exception as e: - self.module.fail_json(msg='Could not update group %s in realm %s: %s' - % (grouprep['name'], realm, str(e))) + self.fail_request(e, msg='Could not update group %s in realm %s: %s' + % (grouprep['name'], realm, str(e))) def delete_group(self, name=None, groupid=None, realm="master"): """ Delete a group. One of name or groupid must be provided. @@ -1319,7 +1800,7 @@ class KeycloakAPI(object): raise Exception("Unable to delete group - one of group ID or name must be provided.") # only lookup the name if groupid isn't provided. - # in the case that both are provided, prefer the ID, since it's one + # in the case that both are provided, prefer the ID, since it is one # less lookup. if groupid is None and name is not None: for group in self.get_groups(realm=realm): @@ -1334,10 +1815,9 @@ class KeycloakAPI(object): # should have a good groupid by here. group_url = URL_GROUP.format(realm=realm, groupid=groupid, url=self.baseurl) try: - return open_url(group_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(group_url, method='DELETE') except Exception as e: - self.module.fail_json(msg="Unable to delete group %s: %s" % (groupid, str(e))) + self.fail_request(e, msg="Unable to delete group %s: %s" % (groupid, str(e))) def get_realm_roles(self, realm='master'): """ Obtains role representations for roles in a realm @@ -1347,15 +1827,13 @@ class KeycloakAPI(object): """ rolelist_url = URL_REALM_ROLES.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(rolelist_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(rolelist_url, method='GET') except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of roles for realm %s: %s' % (realm, str(e))) except Exception as e: - self.module.fail_json(msg='Could not obtain list of roles for realm %s: %s' - % (realm, str(e))) + self.fail_request(e, msg='Could not obtain list of roles for realm %s: %s' + % (realm, str(e))) def get_realm_role(self, name, realm='master'): """ Fetch a keycloak role from the provided realm using the role's name. @@ -1364,16 +1842,15 @@ class KeycloakAPI(object): :param name: Name of the role to fetch. :param realm: Realm in which the role resides; default 'master'. """ - role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name)) + role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name, safe='')) try: - return json.loads(to_native(open_url(role_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(role_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg='Could not fetch role %s in realm %s: %s' - % (name, realm, str(e))) + self.fail_request(e, msg='Could not fetch role %s in realm %s: %s' + % (name, realm, str(e))) except Exception as e: self.module.fail_json(msg='Could not fetch role %s in realm %s: %s' % (name, realm, str(e))) @@ -1386,11 +1863,13 @@ class KeycloakAPI(object): """ roles_url = URL_REALM_ROLES.format(url=self.baseurl, realm=realm) try: - return open_url(roles_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(rolerep), validate_certs=self.validate_certs) + if "composites" in rolerep: + keycloak_compatible_composites = self.convert_role_composites(rolerep["composites"]) + rolerep["composites"] = keycloak_compatible_composites + return self._request(roles_url, method='POST', data=json.dumps(rolerep)) except Exception as e: - self.module.fail_json(msg='Could not create role %s in realm %s: %s' - % (rolerep['name'], realm, str(e))) + self.fail_request(e, msg='Could not create role %s in realm %s: %s' + % (rolerep['name'], realm, str(e))) def update_realm_role(self, rolerep, realm='master'): """ Update an existing realm role. @@ -1398,13 +1877,116 @@ class KeycloakAPI(object): :param rolerep: A RoleRepresentation of the updated role. :return HTTPResponse object on success """ - role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(rolerep['name'])) + role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(rolerep['name']), safe='') try: - return open_url(role_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(rolerep), validate_certs=self.validate_certs) + composites = None + if "composites" in rolerep: + composites = copy.deepcopy(rolerep["composites"]) + del rolerep["composites"] + role_response = self._request(role_url, method='PUT', data=json.dumps(rolerep)) + if composites is not None: + self.update_role_composites(rolerep=rolerep, composites=composites, realm=realm) + return role_response except Exception as e: - self.module.fail_json(msg='Could not update role %s in realm %s: %s' - % (rolerep['name'], realm, str(e))) + self.fail_request(e, msg='Could not update role %s in realm %s: %s' + % (rolerep['name'], realm, str(e))) + + def get_role_composites(self, rolerep, clientid=None, realm='master'): + composite_url = '' + try: + if clientid is not None: + client = self.get_client_by_clientid(client_id=clientid, realm=realm) + cid = client['id'] + composite_url = URL_CLIENT_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep["name"], safe='')) + else: + composite_url = URL_REALM_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, name=quote(rolerep["name"], safe='')) + # Get existing composites + return self._request_and_deserialize(composite_url, method='GET') + except Exception as e: + self.fail_request(e, msg='Could not get role %s composites in realm %s: %s' + % (rolerep['name'], realm, str(e))) + + def create_role_composites(self, rolerep, composites, clientid=None, realm='master'): + composite_url = '' + try: + if clientid is not None: + client = self.get_client_by_clientid(client_id=clientid, realm=realm) + cid = client['id'] + composite_url = URL_CLIENT_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep["name"], safe='')) + else: + composite_url = URL_REALM_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, name=quote(rolerep["name"], safe='')) + # Get existing composites + # create new composites + return self._request(composite_url, method='POST', data=json.dumps(composites)) + except Exception as e: + self.fail_request(e, msg='Could not create role %s composites in realm %s: %s' + % (rolerep['name'], realm, str(e))) + + def delete_role_composites(self, rolerep, composites, clientid=None, realm='master'): + composite_url = '' + try: + if clientid is not None: + client = self.get_client_by_clientid(client_id=clientid, realm=realm) + cid = client['id'] + composite_url = URL_CLIENT_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep["name"], safe='')) + else: + composite_url = URL_REALM_ROLE_COMPOSITES.format(url=self.baseurl, realm=realm, name=quote(rolerep["name"], safe='')) + # Get existing composites + # create new composites + return self._request(composite_url, method='DELETE', data=json.dumps(composites)) + except Exception as e: + self.fail_request(e, msg='Could not create role %s composites in realm %s: %s' + % (rolerep['name'], realm, str(e))) + + def update_role_composites(self, rolerep, composites, clientid=None, realm='master'): + # Get existing composites + existing_composites = self.get_role_composites(rolerep=rolerep, clientid=clientid, realm=realm) + composites_to_be_created = [] + composites_to_be_deleted = [] + for composite in composites: + composite_found = False + existing_composite_client = None + for existing_composite in existing_composites: + if existing_composite["clientRole"]: + existing_composite_client = self.get_client_by_id(existing_composite["containerId"], realm=realm) + if ("client_id" in composite + and composite['client_id'] is not None + and existing_composite_client["clientId"] == composite["client_id"] + and composite["name"] == existing_composite["name"]): + composite_found = True + break + else: + if (("client_id" not in composite or composite['client_id'] is None) + and composite["name"] == existing_composite["name"]): + composite_found = True + break + if not composite_found and ('state' not in composite or composite['state'] == 'present'): + if "client_id" in composite and composite['client_id'] is not None: + client_roles = self.get_client_roles(clientid=composite['client_id'], realm=realm) + for client_role in client_roles: + if client_role['name'] == composite['name']: + composites_to_be_created.append(client_role) + break + else: + realm_role = self.get_realm_role(name=composite["name"], realm=realm) + composites_to_be_created.append(realm_role) + elif composite_found and 'state' in composite and composite['state'] == 'absent': + if "client_id" in composite and composite['client_id'] is not None: + client_roles = self.get_client_roles(clientid=composite['client_id'], realm=realm) + for client_role in client_roles: + if client_role['name'] == composite['name']: + composites_to_be_deleted.append(client_role) + break + else: + realm_role = self.get_realm_role(name=composite["name"], realm=realm) + composites_to_be_deleted.append(realm_role) + + if len(composites_to_be_created) > 0: + # create new composites + self.create_role_composites(rolerep=rolerep, composites=composites_to_be_created, clientid=clientid, realm=realm) + if len(composites_to_be_deleted) > 0: + # delete new composites + self.delete_role_composites(rolerep=rolerep, composites=composites_to_be_deleted, clientid=clientid, realm=realm) def delete_realm_role(self, name, realm='master'): """ Delete a realm role. @@ -1412,13 +1994,12 @@ class KeycloakAPI(object): :param name: The name of the role. :param realm: The realm in which this role resides, default "master". """ - role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name)) + role_url = URL_REALM_ROLE.format(url=self.baseurl, realm=realm, name=quote(name, safe='')) try: - return open_url(role_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(role_url, method='DELETE') except Exception as e: - self.module.fail_json(msg='Unable to delete role %s in realm %s: %s' - % (name, realm, str(e))) + self.fail_request(e, msg='Unable to delete role %s in realm %s: %s' + % (name, realm, str(e))) def get_client_roles(self, clientid, realm='master'): """ Obtains role representations for client roles in a specific client @@ -1433,15 +2014,13 @@ class KeycloakAPI(object): % (clientid, realm)) rolelist_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid) try: - return json.loads(to_native(open_url(rolelist_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(rolelist_url, method='GET') except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of roles for client %s in realm %s: %s' % (clientid, realm, str(e))) except Exception as e: - self.module.fail_json(msg='Could not obtain list of roles for client %s in realm %s: %s' - % (clientid, realm, str(e))) + self.fail_request(e, msg='Could not obtain list of roles for client %s in realm %s: %s' + % (clientid, realm, str(e))) def get_client_role(self, name, clientid, realm='master'): """ Fetch a keycloak client role from the provided realm using the role's name. @@ -1456,16 +2035,15 @@ class KeycloakAPI(object): if cid is None: self.module.fail_json(msg='Could not find client %s in realm %s' % (clientid, realm)) - role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name)) + role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name, safe='')) try: - return json.loads(to_native(open_url(role_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(role_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg='Could not fetch role %s in client %s of realm %s: %s' - % (name, clientid, realm, str(e))) + self.fail_request(e, msg='Could not fetch role %s in client %s of realm %s: %s' + % (name, clientid, realm, str(e))) except Exception as e: self.module.fail_json(msg='Could not fetch role %s for client %s in realm %s: %s' % (name, clientid, realm, str(e))) @@ -1484,11 +2062,28 @@ class KeycloakAPI(object): % (clientid, realm)) roles_url = URL_CLIENT_ROLES.format(url=self.baseurl, realm=realm, id=cid) try: - return open_url(roles_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(rolerep), validate_certs=self.validate_certs) + if "composites" in rolerep: + keycloak_compatible_composites = self.convert_role_composites(rolerep["composites"]) + rolerep["composites"] = keycloak_compatible_composites + return self._request(roles_url, method='POST', data=json.dumps(rolerep)) except Exception as e: - self.module.fail_json(msg='Could not create role %s for client %s in realm %s: %s' - % (rolerep['name'], clientid, realm, str(e))) + self.fail_request(e, msg='Could not create role %s for client %s in realm %s: %s' + % (rolerep['name'], clientid, realm, str(e))) + + def convert_role_composites(self, composites): + keycloak_compatible_composites = { + 'client': {}, + 'realm': [] + } + for composite in composites: + if 'state' not in composite or composite['state'] == 'present': + if "client_id" in composite and composite["client_id"] is not None: + if composite["client_id"] not in keycloak_compatible_composites["client"]: + keycloak_compatible_composites["client"][composite["client_id"]] = [] + keycloak_compatible_composites["client"][composite["client_id"]].append(composite["name"]) + else: + keycloak_compatible_composites["realm"].append(composite["name"]) + return keycloak_compatible_composites def update_client_role(self, rolerep, clientid, realm="master"): """ Update an existing client role. @@ -1502,13 +2097,19 @@ class KeycloakAPI(object): if cid is None: self.module.fail_json(msg='Could not find client %s in realm %s' % (clientid, realm)) - role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep['name'])) + role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(rolerep['name'], safe='')) try: - return open_url(role_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(rolerep), validate_certs=self.validate_certs) + composites = None + if "composites" in rolerep: + composites = copy.deepcopy(rolerep["composites"]) + del rolerep['composites'] + update_role_response = self._request(role_url, method='PUT', data=json.dumps(rolerep)) + if composites is not None: + self.update_role_composites(rolerep=rolerep, clientid=clientid, composites=composites, realm=realm) + return update_role_response except Exception as e: - self.module.fail_json(msg='Could not update role %s for client %s in realm %s: %s' - % (rolerep['name'], clientid, realm, str(e))) + self.fail_request(e, msg='Could not update role %s for client %s in realm %s: %s' + % (rolerep['name'], clientid, realm, str(e))) def delete_client_role(self, name, clientid, realm="master"): """ Delete a role. One of name or roleid must be provided. @@ -1521,17 +2122,16 @@ class KeycloakAPI(object): if cid is None: self.module.fail_json(msg='Could not find client %s in realm %s' % (clientid, realm)) - role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name)) + role_url = URL_CLIENT_ROLE.format(url=self.baseurl, realm=realm, id=cid, name=quote(name, safe='')) try: - return open_url(role_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(role_url, method='DELETE') except Exception as e: - self.module.fail_json(msg='Unable to delete role %s for client %s in realm %s: %s' - % (name, clientid, realm, str(e))) + self.fail_request(e, msg='Unable to delete role %s for client %s in realm %s: %s' + % (name, clientid, realm, str(e))) def get_authentication_flow_by_alias(self, alias, realm='master'): """ - Get an authentication flow by it's alias + Get an authentication flow by its alias :param alias: Alias of the authentication flow to get. :param realm: Realm. :return: Authentication flow representation. @@ -1539,16 +2139,14 @@ class KeycloakAPI(object): try: authentication_flow = {} # Check if the authentication flow exists on the Keycloak serveraders - authentications = json.load(open_url(URL_AUTHENTICATION_FLOWS.format(url=self.baseurl, realm=realm), method='GET', - http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, validate_certs=self.validate_certs)) + authentications = json.load(self._request(URL_AUTHENTICATION_FLOWS.format(url=self.baseurl, realm=realm), method='GET')) for authentication in authentications: if authentication["alias"] == alias: authentication_flow = authentication break return authentication_flow except Exception as e: - self.module.fail_json(msg="Unable get authentication flow %s: %s" % (alias, str(e))) + self.fail_request(e, msg="Unable get authentication flow %s: %s" % (alias, str(e))) def delete_authentication_flow_by_id(self, id, realm='master'): """ @@ -1560,11 +2158,10 @@ class KeycloakAPI(object): flow_url = URL_AUTHENTICATION_FLOW.format(url=self.baseurl, realm=realm, id=id) try: - return open_url(flow_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(flow_url, method='DELETE') except Exception as e: - self.module.fail_json(msg='Could not delete authentication flow %s in realm %s: %s' - % (id, realm, str(e))) + self.fail_request(e, msg='Could not delete authentication flow %s in realm %s: %s' + % (id, realm, str(e))) def copy_auth_flow(self, config, realm='master'): """ @@ -1577,31 +2174,25 @@ class KeycloakAPI(object): new_name = dict( newName=config["alias"] ) - open_url( + self._request( URL_AUTHENTICATION_FLOW_COPY.format( url=self.baseurl, realm=realm, - copyfrom=quote(config["copyFrom"])), + copyfrom=quote(config["copyFrom"], safe='')), method='POST', - http_agent=self.http_agent, headers=self.restheaders, - data=json.dumps(new_name), - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + data=json.dumps(new_name)) flow_list = json.load( - open_url( + self._request( URL_AUTHENTICATION_FLOWS.format(url=self.baseurl, realm=realm), - method='GET', - http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs)) + method='GET')) for flow in flow_list: if flow["alias"] == config["alias"]: return flow return None except Exception as e: - self.module.fail_json(msg='Could not copy authentication flow %s in realm %s: %s' - % (config["alias"], realm, str(e))) + self.fail_request(e, msg='Could not copy authentication flow %s in realm %s: %s' + % (config["alias"], realm, str(e))) def create_empty_auth_flow(self, config, realm='master'): """ @@ -1617,31 +2208,25 @@ class KeycloakAPI(object): description=config["description"], topLevel=True ) - open_url( + self._request( URL_AUTHENTICATION_FLOWS.format( url=self.baseurl, realm=realm), method='POST', - http_agent=self.http_agent, headers=self.restheaders, - data=json.dumps(new_flow), - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + data=json.dumps(new_flow)) flow_list = json.load( - open_url( + self._request( URL_AUTHENTICATION_FLOWS.format( url=self.baseurl, realm=realm), - method='GET', - http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs)) + method='GET')) for flow in flow_list: if flow["alias"] == config["alias"]: return flow return None except Exception as e: - self.module.fail_json(msg='Could not create empty authentication flow %s in realm %s: %s' - % (config["alias"], realm, str(e))) + self.fail_request(e, msg='Could not create empty authentication flow %s in realm %s: %s' + % (config["alias"], realm, str(e))) def update_authentication_executions(self, flowAlias, updatedExec, realm='master'): """ Update authentication executions @@ -1651,16 +2236,16 @@ class KeycloakAPI(object): :return: HTTPResponse object on success """ try: - open_url( + self._request( URL_AUTHENTICATION_FLOW_EXECUTIONS.format( url=self.baseurl, realm=realm, - flowalias=quote(flowAlias)), + flowalias=quote(flowAlias, safe='')), method='PUT', - http_agent=self.http_agent, headers=self.restheaders, - data=json.dumps(updatedExec), - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + data=json.dumps(updatedExec)) + except HTTPError as e: + self.fail_request(e, msg="Unable to update execution '%s': %s: %s %s" + % (flowAlias, repr(e), ";".join([e.url, e.msg, str(e.code), str(e.hdrs)]), str(updatedExec))) except Exception as e: self.module.fail_json(msg="Unable to update executions %s: %s" % (updatedExec, str(e))) @@ -1672,20 +2257,34 @@ class KeycloakAPI(object): :return: HTTPResponse object on success """ try: - open_url( + self._request( URL_AUTHENTICATION_EXECUTION_CONFIG.format( url=self.baseurl, realm=realm, id=executionId), method='POST', - http_agent=self.http_agent, headers=self.restheaders, - data=json.dumps(authenticationConfig), - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + data=json.dumps(authenticationConfig)) except Exception as e: - self.module.fail_json(msg="Unable to add authenticationConfig %s: %s" % (executionId, str(e))) + self.fail_request(e, msg="Unable to add authenticationConfig %s: %s" % (executionId, str(e))) - def create_subflow(self, subflowName, flowAlias, realm='master'): + def delete_authentication_config(self, configId, realm='master'): + """ Delete authenticator config + + :param configId: id of authentication config + :param realm: realm of authentication config to be deleted + """ + try: + # Send a DELETE request to remove the specified authentication config from the Keycloak server. + self._request( + URL_AUTHENTICATION_CONFIG.format( + url=self.baseurl, + realm=realm, + id=configId), + method='DELETE') + except Exception as e: + self.fail_request(e, msg="Unable to delete authentication config %s: %s" % (configId, str(e))) + + def create_subflow(self, subflowName, flowAlias, realm='master', flowType='basic-flow'): """ Create new sublow on the flow :param subflowName: name of the subflow to create @@ -1696,19 +2295,16 @@ class KeycloakAPI(object): newSubFlow = {} newSubFlow["alias"] = subflowName newSubFlow["provider"] = "registration-page-form" - newSubFlow["type"] = "basic-flow" - open_url( + newSubFlow["type"] = flowType + self._request( URL_AUTHENTICATION_FLOW_EXECUTIONS_FLOW.format( url=self.baseurl, realm=realm, - flowalias=quote(flowAlias)), + flowalias=quote(flowAlias, safe='')), method='POST', - http_agent=self.http_agent, headers=self.restheaders, - data=json.dumps(newSubFlow), - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + data=json.dumps(newSubFlow)) except Exception as e: - self.module.fail_json(msg="Unable to create new subflow %s: %s" % (subflowName, str(e))) + self.fail_request(e, msg="Unable to create new subflow %s: %s" % (subflowName, str(e))) def create_execution(self, execution, flowAlias, realm='master'): """ Create new execution on the flow @@ -1721,18 +2317,18 @@ class KeycloakAPI(object): newExec = {} newExec["provider"] = execution["providerId"] newExec["requirement"] = execution["requirement"] - open_url( + self._request( URL_AUTHENTICATION_FLOW_EXECUTIONS_EXECUTION.format( url=self.baseurl, realm=realm, - flowalias=quote(flowAlias)), + flowalias=quote(flowAlias, safe='')), method='POST', - http_agent=self.http_agent, headers=self.restheaders, - data=json.dumps(newExec), - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + data=json.dumps(newExec)) + except HTTPError as e: + self.fail_request(e, msg="Unable to create new execution '%s' %s: %s: %s %s" + % (flowAlias, execution["providerId"], repr(e), ";".join([e.url, e.msg, str(e.code), str(e.hdrs)]), str(newExec))) except Exception as e: - self.module.fail_json(msg="Unable to create new execution %s: %s" % (execution["provider"], str(e))) + self.module.fail_json(msg="Unable to create new execution '%s' %s: %s" % (flowAlias, execution["providerId"], repr(e))) def change_execution_priority(self, executionId, diff, realm='master'): """ Raise or lower execution priority of diff time @@ -1745,28 +2341,22 @@ class KeycloakAPI(object): try: if diff > 0: for i in range(diff): - open_url( + self._request( URL_AUTHENTICATION_EXECUTION_RAISE_PRIORITY.format( url=self.baseurl, realm=realm, id=executionId), - method='POST', - http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + method='POST') elif diff < 0: for i in range(-diff): - open_url( + self._request( URL_AUTHENTICATION_EXECUTION_LOWER_PRIORITY.format( url=self.baseurl, realm=realm, id=executionId), - method='POST', - http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs) + method='POST') except Exception as e: - self.module.fail_json(msg="Unable to change execution priority %s: %s" % (executionId, str(e))) + self.fail_request(e, msg="Unable to change execution priority %s: %s" % (executionId, str(e))) def get_executions_representation(self, config, realm='master'): """ @@ -1778,33 +2368,128 @@ class KeycloakAPI(object): try: # Get executions created executions = json.load( - open_url( + self._request( URL_AUTHENTICATION_FLOW_EXECUTIONS.format( url=self.baseurl, realm=realm, - flowalias=quote(config["alias"])), - method='GET', - http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs)) + flowalias=quote(config["alias"], safe='')), + method='GET')) for execution in executions: if "authenticationConfig" in execution: execConfigId = execution["authenticationConfig"] execConfig = json.load( - open_url( + self._request( URL_AUTHENTICATION_CONFIG.format( url=self.baseurl, realm=realm, id=execConfigId), - method='GET', - http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs)) + method='GET')) execution["authenticationConfig"] = execConfig return executions except Exception as e: - self.module.fail_json(msg='Could not get executions for authentication flow %s in realm %s: %s' - % (config["alias"], realm, str(e))) + self.fail_request(e, msg='Could not get executions for authentication flow %s in realm %s: %s' + % (config["alias"], realm, str(e))) + + def get_required_actions(self, realm='master'): + """ + Get required actions. + :param realm: Realm name (not id). + :return: List of representations of the required actions. + """ + + try: + required_actions = json.load( + self._request( + URL_AUTHENTICATION_REQUIRED_ACTIONS.format( + url=self.baseurl, + realm=realm + ), + method='GET' + ) + ) + + return required_actions + except Exception: + return None + + def register_required_action(self, rep, realm='master'): + """ + Register required action. + :param rep: JSON containing 'providerId', and 'name' attributes. + :param realm: Realm name (not id). + :return: Representation of the required action. + """ + + data = { + 'name': rep['name'], + 'providerId': rep['providerId'] + } + + try: + return self._request( + URL_AUTHENTICATION_REGISTER_REQUIRED_ACTION.format( + url=self.baseurl, + realm=realm + ), + method='POST', + data=json.dumps(data), + ) + except Exception as e: + self.fail_request( + e, + msg='Unable to register required action %s in realm %s: %s' + % (rep["name"], realm, str(e)) + ) + + def update_required_action(self, alias, rep, realm='master'): + """ + Update required action. + :param alias: Alias of required action. + :param rep: JSON describing new state of required action. + :param realm: Realm name (not id). + :return: HTTPResponse object on success. + """ + + try: + return self._request( + URL_AUTHENTICATION_REQUIRED_ACTIONS_ALIAS.format( + url=self.baseurl, + alias=quote(alias, safe=''), + realm=realm + ), + method='PUT', + data=json.dumps(rep), + ) + except Exception as e: + self.fail_request( + e, + msg='Unable to update required action %s in realm %s: %s' + % (alias, realm, str(e)) + ) + + def delete_required_action(self, alias, realm='master'): + """ + Delete required action. + :param alias: Alias of required action. + :param realm: Realm name (not id). + :return: HTTPResponse object on success. + """ + + try: + return self._request( + URL_AUTHENTICATION_REQUIRED_ACTIONS_ALIAS.format( + url=self.baseurl, + alias=quote(alias, safe=''), + realm=realm + ), + method='DELETE', + ) + except Exception as e: + self.fail_request( + e, + msg='Unable to delete required action %s in realm %s: %s' + % (alias, realm, str(e)) + ) def get_identity_providers(self, realm='master'): """ Fetch representations for identity providers in a realm @@ -1813,14 +2498,13 @@ class KeycloakAPI(object): """ idps_url = URL_IDENTITY_PROVIDERS.format(url=self.baseurl, realm=realm) try: - return json.loads(to_native(open_url(idps_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(idps_url, method='GET') except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of identity providers for realm %s: %s' % (realm, str(e))) except Exception as e: - self.module.fail_json(msg='Could not obtain list of identity providers for realm %s: %s' - % (realm, str(e))) + self.fail_request(e, msg='Could not obtain list of identity providers for realm %s: %s' + % (realm, str(e))) def get_identity_provider(self, alias, realm='master'): """ Fetch identity provider representation from a realm using the idp's alias. @@ -1830,14 +2514,13 @@ class KeycloakAPI(object): """ idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=alias) try: - return json.loads(to_native(open_url(idp_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(idp_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg='Could not fetch identity provider %s in realm %s: %s' - % (alias, realm, str(e))) + self.fail_request(e, msg='Could not fetch identity provider %s in realm %s: %s' + % (alias, realm, str(e))) except Exception as e: self.module.fail_json(msg='Could not fetch identity provider %s in realm %s: %s' % (alias, realm, str(e))) @@ -1850,11 +2533,10 @@ class KeycloakAPI(object): """ idps_url = URL_IDENTITY_PROVIDERS.format(url=self.baseurl, realm=realm) try: - return open_url(idps_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(idprep), validate_certs=self.validate_certs) + return self._request(idps_url, method='POST', data=json.dumps(idprep)) except Exception as e: - self.module.fail_json(msg='Could not create identity provider %s in realm %s: %s' - % (idprep['alias'], realm, str(e))) + self.fail_request(e, msg='Could not create identity provider %s in realm %s: %s' + % (idprep['alias'], realm, str(e))) def update_identity_provider(self, idprep, realm='master'): """ Update an existing identity provider. @@ -1864,11 +2546,10 @@ class KeycloakAPI(object): """ idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=idprep['alias']) try: - return open_url(idp_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(idprep), validate_certs=self.validate_certs) + return self._request(idp_url, method='PUT', data=json.dumps(idprep)) except Exception as e: - self.module.fail_json(msg='Could not update identity provider %s in realm %s: %s' - % (idprep['alias'], realm, str(e))) + self.fail_request(e, msg='Could not update identity provider %s in realm %s: %s' + % (idprep['alias'], realm, str(e))) def delete_identity_provider(self, alias, realm='master'): """ Delete an identity provider. @@ -1877,11 +2558,10 @@ class KeycloakAPI(object): """ idp_url = URL_IDENTITY_PROVIDER.format(url=self.baseurl, realm=realm, alias=alias) try: - return open_url(idp_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(idp_url, method='DELETE') except Exception as e: - self.module.fail_json(msg='Unable to delete identity provider %s in realm %s: %s' - % (alias, realm, str(e))) + self.fail_request(e, msg='Unable to delete identity provider %s in realm %s: %s' + % (alias, realm, str(e))) def get_identity_provider_mappers(self, alias, realm='master'): """ Fetch representations for identity provider mappers @@ -1891,15 +2571,30 @@ class KeycloakAPI(object): """ mappers_url = URL_IDENTITY_PROVIDER_MAPPERS.format(url=self.baseurl, realm=realm, alias=alias) try: - return json.loads(to_native(open_url(mappers_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(mappers_url, method='GET') except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of identity provider mappers for idp %s in realm %s: %s' % (alias, realm, str(e))) except Exception as e: - self.module.fail_json(msg='Could not obtain list of identity provider mappers for idp %s in realm %s: %s' - % (alias, realm, str(e))) + self.fail_request(e, msg='Could not obtain list of identity provider mappers for idp %s in realm %s: %s' + % (alias, realm, str(e))) + + def fetch_idp_endpoints_import_config_url(self, fromUrl, providerId='oidc', realm='master'): + """ Import an identity provider configuration through Keycloak server from a well-known URL. + :param fromUrl: URL to import the identity provider configuration from. + "param providerId: Provider ID of the identity provider to import, default 'oidc'. + :param realm: Realm + :return: IDP endpoins. + """ + try: + payload = { + "providerId": providerId, + "fromUrl": fromUrl + } + idps_url = URL_IDENTITY_PROVIDER_IMPORT.format(url=self.baseurl, realm=realm) + return self._request_and_deserialize(idps_url, method='POST', data=json.dumps(payload)) + except Exception as e: + self.fail_request(e, msg='Could not import the IdP config in realm %s: %s' % (realm, str(e))) def get_identity_provider_mapper(self, mid, alias, realm='master'): """ Fetch identity provider representation from a realm using the idp's alias. @@ -1910,15 +2605,13 @@ class KeycloakAPI(object): """ mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mid) try: - return json.loads(to_native(open_url(mapper_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, - timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(mapper_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg='Could not fetch mapper %s for identity provider %s in realm %s: %s' - % (mid, alias, realm, str(e))) + self.fail_request(e, msg='Could not fetch mapper %s for identity provider %s in realm %s: %s' + % (mid, alias, realm, str(e))) except Exception as e: self.module.fail_json(msg='Could not fetch mapper %s for identity provider %s in realm %s: %s' % (mid, alias, realm, str(e))) @@ -1932,11 +2625,10 @@ class KeycloakAPI(object): """ mappers_url = URL_IDENTITY_PROVIDER_MAPPERS.format(url=self.baseurl, realm=realm, alias=alias) try: - return open_url(mappers_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(mapper), validate_certs=self.validate_certs) + return self._request(mappers_url, method='POST', data=json.dumps(mapper)) except Exception as e: - self.module.fail_json(msg='Could not create identity provider mapper %s for idp %s in realm %s: %s' - % (mapper['name'], alias, realm, str(e))) + self.fail_request(e, msg='Could not create identity provider mapper %s for idp %s in realm %s: %s' + % (mapper['name'], alias, realm, str(e))) def update_identity_provider_mapper(self, mapper, alias, realm='master'): """ Update an existing identity provider. @@ -1947,11 +2639,10 @@ class KeycloakAPI(object): """ mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mapper['id']) try: - return open_url(mapper_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(mapper), validate_certs=self.validate_certs) + return self._request(mapper_url, method='PUT', data=json.dumps(mapper)) except Exception as e: - self.module.fail_json(msg='Could not update mapper %s for identity provider %s in realm %s: %s' - % (mapper['id'], alias, realm, str(e))) + self.fail_request(e, msg='Could not update mapper %s for identity provider %s in realm %s: %s' + % (mapper['id'], alias, realm, str(e))) def delete_identity_provider_mapper(self, mid, alias, realm='master'): """ Delete an identity provider. @@ -1961,11 +2652,10 @@ class KeycloakAPI(object): """ mapper_url = URL_IDENTITY_PROVIDER_MAPPER.format(url=self.baseurl, realm=realm, alias=alias, id=mid) try: - return open_url(mapper_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(mapper_url, method='DELETE') except Exception as e: - self.module.fail_json(msg='Unable to delete mapper %s for identity provider %s in realm %s: %s' - % (mid, alias, realm, str(e))) + self.fail_request(e, msg='Unable to delete mapper %s for identity provider %s in realm %s: %s' + % (mid, alias, realm, str(e))) def get_components(self, filter=None, realm='master'): """ Fetch representations for components in a realm @@ -1978,14 +2668,13 @@ class KeycloakAPI(object): comps_url += '?%s' % filter try: - return json.loads(to_native(open_url(comps_url, method='GET', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(comps_url, method='GET') except ValueError as e: self.module.fail_json(msg='API returned incorrect JSON when trying to obtain list of components for realm %s: %s' % (realm, str(e))) except Exception as e: - self.module.fail_json(msg='Could not obtain list of components for realm %s: %s' - % (realm, str(e))) + self.fail_request(e, msg='Could not obtain list of components for realm %s: %s' + % (realm, str(e))) def get_component(self, cid, realm='master'): """ Fetch component representation from a realm using its cid. @@ -1995,14 +2684,13 @@ class KeycloakAPI(object): """ comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid) try: - return json.loads(to_native(open_url(comp_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(comp_url, method="GET") except HTTPError as e: if e.code == 404: return None else: - self.module.fail_json(msg='Could not fetch component %s in realm %s: %s' - % (cid, realm, str(e))) + self.fail_request(e, msg='Could not fetch component %s in realm %s: %s' + % (cid, realm, str(e))) except Exception as e: self.module.fail_json(msg='Could not fetch component %s in realm %s: %s' % (cid, realm, str(e))) @@ -2015,17 +2703,15 @@ class KeycloakAPI(object): """ comps_url = URL_COMPONENTS.format(url=self.baseurl, realm=realm) try: - resp = open_url(comps_url, method='POST', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(comprep), validate_certs=self.validate_certs) + resp = self._request(comps_url, method='POST', data=json.dumps(comprep)) comp_url = resp.getheader('Location') if comp_url is None: self.module.fail_json(msg='Could not create component in realm %s: %s' % (realm, 'unexpected response')) - return json.loads(to_native(open_url(comp_url, method="GET", http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs).read())) + return self._request_and_deserialize(comp_url, method="GET") except Exception as e: - self.module.fail_json(msg='Could not create component in realm %s: %s' - % (realm, str(e))) + self.fail_request(e, msg='Could not create component in realm %s: %s' + % (realm, str(e))) def update_component(self, comprep, realm='master'): """ Update an existing component. @@ -2038,11 +2724,10 @@ class KeycloakAPI(object): self.module.fail_json(msg='Cannot update component without id') comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid) try: - return open_url(comp_url, method='PUT', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - data=json.dumps(comprep), validate_certs=self.validate_certs) + return self._request(comp_url, method='PUT', data=json.dumps(comprep)) except Exception as e: - self.module.fail_json(msg='Could not update component %s in realm %s: %s' - % (cid, realm, str(e))) + self.fail_request(e, msg='Could not update component %s in realm %s: %s' + % (cid, realm, str(e))) def delete_component(self, cid, realm='master'): """ Delete an component. @@ -2051,8 +2736,505 @@ class KeycloakAPI(object): """ comp_url = URL_COMPONENT.format(url=self.baseurl, realm=realm, id=cid) try: - return open_url(comp_url, method='DELETE', http_agent=self.http_agent, headers=self.restheaders, timeout=self.connection_timeout, - validate_certs=self.validate_certs) + return self._request(comp_url, method='DELETE') except Exception as e: - self.module.fail_json(msg='Unable to delete component %s in realm %s: %s' - % (cid, realm, str(e))) + self.fail_request(e, msg='Unable to delete component %s in realm %s: %s' + % (cid, realm, str(e))) + + def get_authz_authorization_scope_by_name(self, name, client_id, realm): + url = URL_AUTHZ_AUTHORIZATION_SCOPES.format(url=self.baseurl, client_id=client_id, realm=realm) + search_url = "%s/search?name=%s" % (url, quote(name, safe='')) + + try: + return self._request_and_deserialize(search_url, method='GET') + except Exception: + return False + + def create_authz_authorization_scope(self, payload, client_id, realm): + """Create an authorization scope for a Keycloak client""" + url = URL_AUTHZ_AUTHORIZATION_SCOPES.format(url=self.baseurl, client_id=client_id, realm=realm) + + try: + return self._request(url, method='POST', data=json.dumps(payload)) + except Exception as e: + self.fail_request(e, msg='Could not create authorization scope %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e))) + + def update_authz_authorization_scope(self, payload, id, client_id, realm): + """Update an authorization scope for a Keycloak client""" + url = URL_AUTHZ_AUTHORIZATION_SCOPE.format(url=self.baseurl, id=id, client_id=client_id, realm=realm) + + try: + return self._request(url, method='PUT', data=json.dumps(payload)) + except Exception as e: + self.fail_request(e, msg='Could not create update scope %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e))) + + def remove_authz_authorization_scope(self, id, client_id, realm): + """Remove an authorization scope from a Keycloak client""" + url = URL_AUTHZ_AUTHORIZATION_SCOPE.format(url=self.baseurl, id=id, client_id=client_id, realm=realm) + + try: + return self._request(url, method='DELETE') + except Exception as e: + self.fail_request(e, msg='Could not delete scope %s for client %s in realm %s: %s' % (id, client_id, realm, str(e))) + + def get_user_by_id(self, user_id, realm='master'): + """ + Get a User by its ID. + :param user_id: ID of the user. + :param realm: Realm + :return: Representation of the user. + """ + try: + user_url = URL_USER.format( + url=self.baseurl, + realm=realm, + id=user_id) + userrep = json.load( + self._request( + user_url, + method='GET')) + return userrep + except Exception as e: + self.fail_request(e, msg='Could not get user %s in realm %s: %s' + % (user_id, realm, str(e))) + + def create_user(self, userrep, realm='master'): + """ + Create a new User. + :param userrep: Representation of the user to create + :param realm: Realm + :return: Representation of the user created. + """ + try: + if 'attributes' in userrep and isinstance(userrep['attributes'], list): + attributes = copy.deepcopy(userrep['attributes']) + userrep['attributes'] = self.convert_user_attributes_to_keycloak_dict(attributes=attributes) + users_url = URL_USERS.format( + url=self.baseurl, + realm=realm) + self._request(users_url, + method='POST', + data=json.dumps(userrep)) + created_user = self.get_user_by_username( + username=userrep['username'], + realm=realm) + return created_user + except Exception as e: + self.fail_request(e, msg='Could not create user %s in realm %s: %s' + % (userrep['username'], realm, str(e))) + + def convert_user_attributes_to_keycloak_dict(self, attributes): + keycloak_user_attributes_dict = {} + for attribute in attributes: + if ('state' not in attribute or attribute['state'] == 'present') and 'name' in attribute: + keycloak_user_attributes_dict[attribute['name']] = attribute['values'] if 'values' in attribute else [] + return keycloak_user_attributes_dict + + def convert_keycloak_user_attributes_dict_to_module_list(self, attributes): + module_attributes_list = [] + for key in attributes: + attr = {} + attr['name'] = key + attr['values'] = attributes[key] + module_attributes_list.append(attr) + return module_attributes_list + + def update_user(self, userrep, realm='master'): + """ + Update a User. + :param userrep: Representation of the user to update. This representation must include the ID of the user. + :param realm: Realm + :return: Representation of the updated user. + """ + try: + if 'attributes' in userrep and isinstance(userrep['attributes'], list): + attributes = copy.deepcopy(userrep['attributes']) + userrep['attributes'] = self.convert_user_attributes_to_keycloak_dict(attributes=attributes) + user_url = URL_USER.format( + url=self.baseurl, + realm=realm, + id=userrep["id"]) + self._request( + user_url, + method='PUT', + data=json.dumps(userrep)) + updated_user = self.get_user_by_id( + user_id=userrep['id'], + realm=realm) + return updated_user + except Exception as e: + self.fail_request(e, msg='Could not update user %s in realm %s: %s' + % (userrep['username'], realm, str(e))) + + def delete_user(self, user_id, realm='master'): + """ + Delete a User. + :param user_id: ID of the user to be deleted + :param realm: Realm + :return: HTTP response. + """ + try: + user_url = URL_USER.format( + url=self.baseurl, + realm=realm, + id=user_id) + return self._request( + user_url, + method='DELETE') + except Exception as e: + self.fail_request(e, msg='Could not delete user %s in realm %s: %s' + % (user_id, realm, str(e))) + + def get_user_groups(self, user_id, realm='master'): + """ + Get the group names for a user. + :param user_id: User ID + :param realm: Realm + :return: The client group names as a list of strings. + """ + user_groups = self.get_user_group_details(user_id, realm) + return [user_group['name'] for user_group in user_groups if 'name' in user_group] + + def get_user_group_details(self, user_id, realm='master'): + """ + Get the group details for a user. + :param user_id: User ID + :param realm: Realm + :return: The client group details as a list of dictionaries. + """ + try: + user_groups_url = URL_USER_GROUPS.format(url=self.baseurl, realm=realm, id=user_id) + return self._request_and_deserialize(user_groups_url, method='GET') + except Exception as e: + self.fail_request(e, msg='Could not get groups for user %s in realm %s: %s' + % (user_id, realm, str(e))) + + def add_user_in_group(self, user_id, group_id, realm='master'): + """DEPRECATED: Call add_user_to_group(...) instead. This method is scheduled for removal in community.general 13.0.0.""" + return self.add_user_to_group(user_id, group_id, realm) + + def add_user_to_group(self, user_id, group_id, realm='master'): + """ + Add a user to a group. + :param user_id: User ID + :param group_id: Group Id to add the user to. + :param realm: Realm + :return: HTTP Response + """ + try: + user_group_url = URL_USER_GROUP.format( + url=self.baseurl, + realm=realm, + id=user_id, + group_id=group_id) + return self._request( + user_group_url, + method='PUT') + except Exception as e: + self.fail_request(e, msg='Could not add user %s to group %s in realm %s: %s' + % (user_id, group_id, realm, str(e))) + + def remove_user_from_group(self, user_id, group_id, realm='master'): + """ + Remove a user from a group for a user. + :param user_id: User ID + :param group_id: Group Id to add the user to. + :param realm: Realm + :return: HTTP response + """ + try: + user_group_url = URL_USER_GROUP.format( + url=self.baseurl, + realm=realm, + id=user_id, + group_id=group_id) + return self._request( + user_group_url, + method='DELETE') + except Exception as e: + self.fail_request(e, msg='Could not remove user %s from group %s in realm %s: %s' + % (user_id, group_id, realm, str(e))) + + def update_user_groups_membership(self, userrep, groups, realm='master'): + """ + Update user's group membership + :param userrep: Representation of the user. This representation must include the ID. + :param realm: Realm + :return: True if group membership has been changed. False Otherwise. + """ + try: + groups_to_add, groups_to_remove = self.extract_groups_to_add_to_and_remove_from_user(groups) + if not groups_to_add and not groups_to_remove: + return False + + user_groups = self.get_user_group_details(user_id=userrep['id'], realm=realm) + user_group_names = [user_group['name'] for user_group in user_groups if 'name' in user_group] + user_group_paths = [user_group['path'] for user_group in user_groups if 'path' in user_group] + + groups_to_add = [group_to_add for group_to_add in groups_to_add + if group_to_add not in user_group_names and group_to_add not in user_group_paths] + groups_to_remove = [group_to_remove for group_to_remove in groups_to_remove + if group_to_remove in user_group_names or group_to_remove in user_group_paths] + if not groups_to_add and not groups_to_remove: + return False + + for group_to_add in groups_to_add: + realm_group = self.find_group_by_path(group_to_add, realm=realm) + if realm_group: + self.add_user_to_group(user_id=userrep['id'], group_id=realm_group['id'], realm=realm) + + for group_to_remove in groups_to_remove: + realm_group = self.find_group_by_path(group_to_remove, realm=realm) + if realm_group: + self.remove_user_from_group(user_id=userrep['id'], group_id=realm_group['id'], realm=realm) + + return True + except Exception as e: + self.module.fail_json(msg='Could not update group membership for user %s in realm %s: %s' + % (userrep['username'], realm, e)) + + def extract_groups_to_add_to_and_remove_from_user(self, groups): + groups_to_add = [] + groups_to_remove = [] + if isinstance(groups, list): + for group in groups: + group_name = group['name'] if isinstance(group, dict) and 'name' in group else group + if isinstance(group, dict): + if 'state' not in group or group['state'] == 'present': + groups_to_add.append(group_name) + else: + groups_to_remove.append(group_name) + return groups_to_add, groups_to_remove + + def find_group_by_path(self, target, realm='master'): + """ + Finds a realm group by path, e.g. '/my/group'. + The path is formed by prepending a '/' character to `target` unless it's already present. + This adds support for finding top level groups by name and subgroups by path. + """ + groups = self.get_groups(realm=realm) + path = target if target.startswith('/') else '/' + target + for segment in path.split('/'): + if not segment: + continue + abort = True + for group in groups: + if group['path'] == path: + return self.get_group_by_groupid(group['id'], realm=realm) + if group['name'] == segment: + groups = self.get_subgroups(group, realm=realm) + abort = False + break + if abort: + break + return None + + def convert_user_group_list_of_str_to_list_of_dict(self, groups): + list_of_groups = [] + if isinstance(groups, list) and len(groups) > 0: + for group in groups: + if isinstance(group, str): + group_dict = {} + group_dict['name'] = group + list_of_groups.append(group_dict) + return list_of_groups + + def create_authz_custom_policy(self, policy_type, payload, client_id, realm): + """Create a custom policy for a Keycloak client""" + url = URL_AUTHZ_CUSTOM_POLICY.format(url=self.baseurl, policy_type=policy_type, client_id=client_id, realm=realm) + + try: + return self._request(url, method='POST', data=json.dumps(payload)) + except Exception as e: + self.fail_request(e, msg='Could not create permission %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e))) + + def remove_authz_custom_policy(self, policy_id, client_id, realm): + """Remove a custom policy from a Keycloak client""" + url = URL_AUTHZ_CUSTOM_POLICIES.format(url=self.baseurl, client_id=client_id, realm=realm) + delete_url = "%s/%s" % (url, policy_id) + + try: + return self._request(delete_url, method='DELETE') + except Exception as e: + self.fail_request(e, msg='Could not delete custom policy %s for client %s in realm %s: %s' % (id, client_id, realm, str(e))) + + def get_authz_permission_by_name(self, name, client_id, realm): + """Get authorization permission by name""" + url = URL_AUTHZ_POLICIES.format(url=self.baseurl, client_id=client_id, realm=realm) + search_url = "%s/search?name=%s" % (url, name.replace(' ', '%20')) + + try: + return self._request_and_deserialize(search_url, method='GET') + except Exception: + return False + + def create_authz_permission(self, payload, permission_type, client_id, realm): + """Create an authorization permission for a Keycloak client""" + url = URL_AUTHZ_PERMISSIONS.format(url=self.baseurl, permission_type=permission_type, client_id=client_id, realm=realm) + + try: + return self._request(url, method='POST', data=json.dumps(payload)) + except Exception as e: + self.fail_request(e, msg='Could not create permission %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e))) + + def remove_authz_permission(self, id, client_id, realm): + """Create an authorization permission for a Keycloak client""" + url = URL_AUTHZ_POLICY.format(url=self.baseurl, id=id, client_id=client_id, realm=realm) + + try: + return self._request(url, method='DELETE') + except Exception as e: + self.fail_request(e, msg='Could not delete permission %s for client %s in realm %s: %s' % (id, client_id, realm, str(e))) + + def update_authz_permission(self, payload, permission_type, id, client_id, realm): + """Update a permission for a Keycloak client""" + url = URL_AUTHZ_PERMISSION.format(url=self.baseurl, permission_type=permission_type, id=id, client_id=client_id, realm=realm) + + try: + return self._request(url, method='PUT', data=json.dumps(payload)) + except Exception as e: + self.fail_request(e, msg='Could not create update permission %s for client %s in realm %s: %s' % (payload['name'], client_id, realm, str(e))) + + def get_authz_resource_by_name(self, name, client_id, realm): + """Get authorization resource by name""" + url = URL_AUTHZ_RESOURCES.format(url=self.baseurl, client_id=client_id, realm=realm) + search_url = "%s/search?name=%s" % (url, name.replace(' ', '%20')) + + try: + return self._request_and_deserialize(search_url, method='GET') + except Exception: + return False + + def get_authz_policy_by_name(self, name, client_id, realm): + """Get authorization policy by name""" + url = URL_AUTHZ_POLICIES.format(url=self.baseurl, client_id=client_id, realm=realm) + search_url = "%s/search?name=%s&permission=false" % (url, name.replace(' ', '%20')) + + try: + return self._request_and_deserialize(search_url, method='GET') + except Exception: + return False + + def get_client_role_scope_from_client(self, clientid, clientscopeid, realm="master"): + """ Fetch the roles associated with the client's scope for a specific client on the Keycloak server. + :param clientid: ID of the client from which to obtain the associated roles. + :param clientscopeid: ID of the client who owns the roles. + :param realm: Realm from which to obtain the scope. + :return: The client scope of roles from specified client. + """ + client_role_scope_url = URL_CLIENT_ROLE_SCOPE_CLIENTS.format(url=self.baseurl, realm=realm, id=clientid, scopeid=clientscopeid) + try: + return self._request_and_deserialize(client_role_scope_url, method='GET') + except Exception as e: + self.fail_request(e, msg='Could not fetch roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) + + def update_client_role_scope_from_client(self, payload, clientid, clientscopeid, realm="master"): + """ Update and fetch the roles associated with the client's scope on the Keycloak server. + :param payload: List of roles to be added to the scope. + :param clientid: ID of the client to update scope. + :param clientscopeid: ID of the client who owns the roles. + :param realm: Realm from which to obtain the clients. + :return: The client scope of roles from specified client. + """ + client_role_scope_url = URL_CLIENT_ROLE_SCOPE_CLIENTS.format(url=self.baseurl, realm=realm, id=clientid, scopeid=clientscopeid) + try: + self._request(client_role_scope_url, method='POST', data=json.dumps(payload)) + + except Exception as e: + self.fail_request(e, msg='Could not update roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) + + return self.get_client_role_scope_from_client(clientid, clientscopeid, realm) + + def delete_client_role_scope_from_client(self, payload, clientid, clientscopeid, realm="master"): + """ Delete the roles contains in the payload from the client's scope on the Keycloak server. + :param payload: List of roles to be deleted. + :param clientid: ID of the client to delete roles from scope. + :param clientscopeid: ID of the client who owns the roles. + :param realm: Realm from which to obtain the clients. + :return: The client scope of roles from specified client. + """ + client_role_scope_url = URL_CLIENT_ROLE_SCOPE_CLIENTS.format(url=self.baseurl, realm=realm, id=clientid, scopeid=clientscopeid) + try: + self._request(client_role_scope_url, method='DELETE', data=json.dumps(payload)) + + except Exception as e: + self.fail_request(e, msg='Could not delete roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) + + return self.get_client_role_scope_from_client(clientid, clientscopeid, realm) + + def get_client_role_scope_from_realm(self, clientid, realm="master"): + """ Fetch the realm roles from the client's scope on the Keycloak server. + :param clientid: ID of the client from which to obtain the associated realm roles. + :param realm: Realm from which to obtain the clients. + :return: The client realm roles scope. + """ + client_role_scope_url = URL_CLIENT_ROLE_SCOPE_REALM.format(url=self.baseurl, realm=realm, id=clientid) + try: + return self._request_and_deserialize(client_role_scope_url, method='GET') + except Exception as e: + self.fail_request(e, msg='Could not fetch roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) + + def update_client_role_scope_from_realm(self, payload, clientid, realm="master"): + """ Update and fetch the realm roles from the client's scope on the Keycloak server. + :param payload: List of realm roles to add. + :param clientid: ID of the client to update scope. + :param realm: Realm from which to obtain the clients. + :return: The client realm roles scope. + """ + client_role_scope_url = URL_CLIENT_ROLE_SCOPE_REALM.format(url=self.baseurl, realm=realm, id=clientid) + try: + self._request(client_role_scope_url, method='POST', data=json.dumps(payload)) + + except Exception as e: + self.fail_request(e, msg='Could not update roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) + + return self.get_client_role_scope_from_realm(clientid, realm) + + def delete_client_role_scope_from_realm(self, payload, clientid, realm="master"): + """ Delete the realm roles contains in the payload from the client's scope on the Keycloak server. + :param payload: List of realm roles to delete. + :param clientid: ID of the client to delete roles from scope. + :param realm: Realm from which to obtain the clients. + :return: The client realm roles scope. + """ + client_role_scope_url = URL_CLIENT_ROLE_SCOPE_REALM.format(url=self.baseurl, realm=realm, id=clientid) + try: + self._request(client_role_scope_url, method='DELETE', data=json.dumps(payload)) + + except Exception as e: + self.fail_request(e, msg='Could not delete roles scope for client %s in realm %s: %s' % (clientid, realm, str(e))) + + return self.get_client_role_scope_from_realm(clientid, realm) + + def fail_request(self, e, msg, **kwargs): + """ Triggers a module failure. This should be called + when an exception occurs during/after a request. + Attempts to parse the exception e as an HTTP error + and append it to msg. + + :param e: exception which triggered the failure + :param msg: error message to display to the user + :param kwargs: additional arguments to pass to module.fail_json + :return: None + """ + try: + if isinstance(e, HTTPError): + msg = "%s: %s" % (msg, to_native(e.read())) + except Exception: + pass + self.module.fail_json(msg, **kwargs) + + def fail_open_url(self, e, msg, **kwargs): + """ DEPRECATED: Use fail_request instead. + + Triggers a module failure. This should be called + when an exception occurs during/after a request. + Attempts to parse the exception e as an HTTP error + and append it to msg. + + :param e: exception which triggered the failure + :param msg: error message to display to the user + :param kwargs: additional arguments to pass to module.fail_json + :return: None + """ + return self.fail_request(e, msg, **kwargs) diff --git a/plugins/module_utils/identity/keycloak/keycloak_clientsecret.py b/plugins/module_utils/identity/keycloak/keycloak_clientsecret.py index 85caa8e16b..2118e8f6e2 100644 --- a/plugins/module_utils/identity/keycloak/keycloak_clientsecret.py +++ b/plugins/module_utils/identity/keycloak/keycloak_clientsecret.py @@ -1,12 +1,10 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright (c) 2022, John Cant # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type from ansible.module_utils.basic import AnsibleModule @@ -35,8 +33,8 @@ def keycloak_clientsecret_module(): argument_spec=argument_spec, supports_check_mode=True, required_one_of=([['id', 'client_id'], - ['token', 'auth_realm', 'auth_username', 'auth_password']]), - required_together=([['auth_realm', 'auth_username', 'auth_password']]), + ['token', 'auth_realm', 'auth_username', 'auth_password', 'auth_client_id', 'auth_client_secret']]), + required_together=([['auth_username', 'auth_password']]), mutually_exclusive=[ ['token', 'auth_realm'], ['token', 'auth_username'], @@ -61,7 +59,7 @@ def keycloak_clientsecret_module_resolve_params(module, kc): client_id = module.params.get('client_id') # only lookup the client_id if id isn't provided. - # in the case that both are provided, prefer the ID, since it's one + # in the case that both are provided, prefer the ID, since it is one # less lookup. if id is None: # Due to the required_one_of spec, client_id is guaranteed to not be None @@ -69,7 +67,7 @@ def keycloak_clientsecret_module_resolve_params(module, kc): if client is None: module.fail_json( - msg='Client does not exist {client_id}'.format(client_id=client_id) + msg=f'Client does not exist {client_id}' ) id = client['id'] diff --git a/plugins/module_utils/ilo_redfish_utils.py b/plugins/module_utils/ilo_redfish_utils.py index a6ab42dba6..fd5b7fe64d 100644 --- a/plugins/module_utils/ilo_redfish_utils.py +++ b/plugins/module_utils/ilo_redfish_utils.py @@ -1,13 +1,12 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved. # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils +import time class iLORedfishUtils(RedfishUtils): @@ -21,20 +20,20 @@ class iLORedfishUtils(RedfishUtils): properties = ['Description', 'Id', 'Name', 'UserName'] # Changed self.sessions_uri to Hardcoded string. - response = self.get_request( - self.root_uri + self.service_root + "SessionService/Sessions/") + response = self.get_request(f"{self.root_uri}{self.service_root}SessionService/Sessions/") if not response['ret']: return response result['ret'] = True data = response['data'] + current_session = None if 'Oem' in data: if data["Oem"]["Hpe"]["Links"]["MySession"]["@odata.id"]: current_session = data["Oem"]["Hpe"]["Links"]["MySession"]["@odata.id"] - for sessions in data[u'Members']: + for sessions in data['Members']: # session_list[] are URIs - session_list.append(sessions[u'@odata.id']) + session_list.append(sessions['@odata.id']) # for each session, get details for uri in session_list: session = {} @@ -83,7 +82,7 @@ class iLORedfishUtils(RedfishUtils): if not res_dhv6['ret']: return res_dhv6 - datetime_uri = self.manager_uri + "DateTime" + datetime_uri = f"{self.manager_uri}DateTime" listofips = mgr_attributes['mgr_attr_value'].split(" ") if len(listofips) > 2: @@ -102,12 +101,12 @@ class iLORedfishUtils(RedfishUtils): if not response1['ret']: return response1 - return {'ret': True, 'changed': True, 'msg': "Modified %s" % mgr_attributes['mgr_attr_name']} + return {'ret': True, 'changed': True, 'msg': f"Modified {mgr_attributes['mgr_attr_name']}"} def set_time_zone(self, attr): key = attr['mgr_attr_name'] - uri = self.manager_uri + "DateTime/" + uri = f"{self.manager_uri}DateTime/" response = self.get_request(self.root_uri + uri) if not response['ret']: return response @@ -115,7 +114,7 @@ class iLORedfishUtils(RedfishUtils): data = response["data"] if key not in data: - return {'ret': False, 'changed': False, 'msg': "Key %s not found" % key} + return {'ret': False, 'changed': False, 'msg': f"Key {key} not found"} timezones = data["TimeZoneList"] index = "" @@ -129,7 +128,7 @@ class iLORedfishUtils(RedfishUtils): if not response['ret']: return response - return {'ret': True, 'changed': True, 'msg': "Modified %s" % attr['mgr_attr_name']} + return {'ret': True, 'changed': True, 'msg': f"Modified {attr['mgr_attr_name']}"} def set_dns_server(self, attr): key = attr['mgr_attr_name'] @@ -161,7 +160,7 @@ class iLORedfishUtils(RedfishUtils): if not response['ret']: return response - return {'ret': True, 'changed': True, 'msg': "Modified %s" % attr['mgr_attr_name']} + return {'ret': True, 'changed': True, 'msg': f"Modified {attr['mgr_attr_name']}"} def set_domain_name(self, attr): key = attr['mgr_attr_name'] @@ -206,7 +205,7 @@ class iLORedfishUtils(RedfishUtils): response = self.patch_request(self.root_uri + ethuri, payload) if not response['ret']: return response - return {'ret': True, 'changed': True, 'msg': "Modified %s" % attr['mgr_attr_name']} + return {'ret': True, 'changed': True, 'msg': f"Modified {attr['mgr_attr_name']}"} def set_wins_registration(self, mgrattr): Key = mgrattr['mgr_attr_name'] @@ -227,4 +226,80 @@ class iLORedfishUtils(RedfishUtils): response = self.patch_request(self.root_uri + ethuri, payload) if not response['ret']: return response - return {'ret': True, 'changed': True, 'msg': "Modified %s" % mgrattr['mgr_attr_name']} + return {'ret': True, 'changed': True, 'msg': f"Modified {mgrattr['mgr_attr_name']}"} + + def get_server_poststate(self): + # Get server details + response = self.get_request(self.root_uri + self.systems_uri) + if not response["ret"]: + return response + server_data = response["data"] + + if "Hpe" in server_data["Oem"]: + return { + "ret": True, + "server_poststate": server_data["Oem"]["Hpe"]["PostState"] + } + else: + return { + "ret": True, + "server_poststate": server_data["Oem"]["Hp"]["PostState"] + } + + def wait_for_ilo_reboot_completion(self, polling_interval=60, max_polling_time=1800): + # This method checks if OOB controller reboot is completed + time.sleep(10) + + # Check server poststate + state = self.get_server_poststate() + if not state["ret"]: + return state + + count = int(max_polling_time / polling_interval) + times = 0 + + # When server is powered OFF + pcount = 0 + while state["server_poststate"] in ["PowerOff", "Off"] and pcount < 5: + time.sleep(10) + state = self.get_server_poststate() + if not state["ret"]: + return state + + if state["server_poststate"] not in ["PowerOff", "Off"]: + break + pcount = pcount + 1 + if state["server_poststate"] in ["PowerOff", "Off"]: + return { + "ret": False, + "changed": False, + "msg": "Server is powered OFF" + } + + # When server is not rebooting + if state["server_poststate"] in ["InPostDiscoveryComplete", "FinishedPost"]: + return { + "ret": True, + "changed": False, + "msg": "Server is not rebooting" + } + + while state["server_poststate"] not in ["InPostDiscoveryComplete", "FinishedPost"] and count > times: + state = self.get_server_poststate() + if not state["ret"]: + return state + + if state["server_poststate"] in ["InPostDiscoveryComplete", "FinishedPost"]: + return { + "ret": True, + "changed": True, + "msg": "Server reboot is completed" + } + time.sleep(polling_interval) + times = times + 1 + + return { + "ret": False, + "changed": False, + "msg": f"Server Reboot has failed, server state: {state} " + } diff --git a/plugins/module_utils/influxdb.py b/plugins/module_utils/influxdb.py index 9a30e76428..9eed90cfda 100644 --- a/plugins/module_utils/influxdb.py +++ b/plugins/module_utils/influxdb.py @@ -1,11 +1,9 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017, Ansible Project # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import traceback @@ -15,7 +13,7 @@ from ansible_collections.community.general.plugins.module_utils.version import L REQUESTS_IMP_ERR = None try: - import requests.exceptions + import requests.exceptions # noqa: F401, pylint: disable=unused-import HAS_REQUESTS = True except ImportError: REQUESTS_IMP_ERR = traceback.format_exc() @@ -25,7 +23,7 @@ INFLUXDB_IMP_ERR = None try: from influxdb import InfluxDBClient from influxdb import __version__ as influxdb_version - from influxdb import exceptions + from influxdb import exceptions # noqa: F401, pylint: disable=unused-import HAS_INFLUXDB = True except ImportError: INFLUXDB_IMP_ERR = traceback.format_exc() diff --git a/plugins/module_utils/ipa.py b/plugins/module_utils/ipa.py index eda9b4132b..96010d503b 100644 --- a/plugins/module_utils/ipa.py +++ b/plugins/module_utils/ipa.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -10,8 +9,7 @@ # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import json import os @@ -20,10 +18,9 @@ import uuid import re from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text -from ansible.module_utils.six import PY3 -from ansible.module_utils.six.moves.urllib.parse import quote from ansible.module_utils.urls import fetch_url, HAS_GSSAPI from ansible.module_utils.basic import env_fallback, AnsibleFallbackNotFound +from urllib.parse import quote def _env_then_dns_fallback(*args, **kwargs): @@ -54,16 +51,16 @@ class IPAClient(object): self.use_gssapi = False def get_base_url(self): - return '%s://%s/ipa' % (self.protocol, self.host) + return f'{self.protocol}://{self.host}/ipa' def get_json_url(self): - return '%s/session/json' % self.get_base_url() + return f'{self.get_base_url()}/session/json' def login(self, username, password): if 'KRB5CCNAME' in os.environ and HAS_GSSAPI: self.use_gssapi = True elif 'KRB5_CLIENT_KTNAME' in os.environ and HAS_GSSAPI: - ccache = "MEMORY:" + str(uuid.uuid4()) + ccache = f"MEMORY:{uuid.uuid4()!s}" os.environ['KRB5CCNAME'] = ccache self.use_gssapi = True else: @@ -74,8 +71,8 @@ class IPAClient(object): 'GSSAPI. To use GSSAPI, please set the ' 'KRB5_CLIENT_KTNAME or KRB5CCNAME (or both) ' ' environment variables.') - url = '%s/session/login_password' % self.get_base_url() - data = 'user=%s&password=%s' % (quote(username, safe=''), quote(password, safe='')) + url = f'{self.get_base_url()}/session/login_password' + data = f"user={quote(username, safe='')}&password={quote(password, safe='')}" headers = {'referer': self.get_base_url(), 'Content-Type': 'application/x-www-form-urlencoded', 'Accept': 'text/plain'} @@ -100,11 +97,11 @@ class IPAClient(object): err_string = e.get('message') else: err_string = e - self.module.fail_json(msg='%s: %s' % (msg, err_string)) + self.module.fail_json(msg=f'{msg}: {err_string}') def get_ipa_version(self): response = self.ping()['summary'] - ipa_ver_regex = re.compile(r'IPA server version (\d\.\d\.\d).*') + ipa_ver_regex = re.compile(r'IPA server version (\d+\.\d+\.\d+).*') version_match = ipa_ver_regex.match(response) ipa_version = None if version_match: @@ -117,7 +114,7 @@ class IPAClient(object): def _post_json(self, method, name, item=None): if item is None: item = {} - url = '%s/session/json' % self.get_base_url() + url = f'{self.get_base_url()}/session/json' data = dict(method=method) # TODO: We should probably handle this a little better. @@ -135,20 +132,13 @@ class IPAClient(object): if status_code not in [200, 201, 204]: self._fail(method, info['msg']) except Exception as e: - self._fail('post %s' % method, to_native(e)) + self._fail(f'post {method}', to_native(e)) - if PY3: - charset = resp.headers.get_content_charset('latin-1') - else: - response_charset = resp.headers.getparam('charset') - if response_charset: - charset = response_charset - else: - charset = 'latin-1' + charset = resp.headers.get_content_charset('latin-1') resp = json.loads(to_text(resp.read(), encoding=charset)) err = resp.get('error') if err is not None: - self._fail('response %s' % method, err) + self._fail(f'response {method}', err) if 'result' in resp: result = resp.get('result') diff --git a/plugins/module_utils/jenkins.py b/plugins/module_utils/jenkins.py index c742b364b7..26334f89b8 100644 --- a/plugins/module_utils/jenkins.py +++ b/plugins/module_utils/jenkins.py @@ -1,12 +1,10 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2022, Alexei Znamensky # # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import os diff --git a/plugins/module_utils/known_hosts.py b/plugins/module_utils/known_hosts.py index 25dd3e174e..ec20b8d88b 100644 --- a/plugins/module_utils/known_hosts.py +++ b/plugins/module_utils/known_hosts.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -10,14 +9,13 @@ # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os import hmac import re -from ansible.module_utils.six.moves.urllib.parse import urlparse +from urllib.parse import urlparse try: from hashlib import sha1 @@ -60,17 +58,14 @@ def get_fqdn_and_port(repo_url): elif "://" in repo_url: # this should be something we can parse with urlparse parts = urlparse(repo_url) - # parts[1] will be empty on python2.4 on ssh:// or git:// urls, so - # ensure we actually have a parts[1] before continuing. - if parts[1] != '': - fqdn = parts[1] - if "@" in fqdn: - fqdn = fqdn.split("@", 1)[1] - match = ipv6_re.match(fqdn) - if match: - fqdn, port = match.groups() - elif ":" in fqdn: - fqdn, port = fqdn.split(":")[0:2] + fqdn = parts[1] + if "@" in fqdn: + fqdn = fqdn.split("@", 1)[1] + match = ipv6_re.match(fqdn) + if match: + fqdn, port = match.groups() + elif ":" in fqdn: + fqdn, port = fqdn.split(":")[0:2] return fqdn, port @@ -103,13 +98,11 @@ def not_in_host_file(self, host): continue try: - host_fh = open(hf) + with open(hf) as host_fh: + data = host_fh.read() except IOError: hfiles_not_found += 1 continue - else: - data = host_fh.read() - host_fh.close() for line in data.split("\n"): if line is None or " " not in line: @@ -153,28 +146,28 @@ def add_host_key(module, fqdn, port=22, key_type="rsa", create_dir=False): try: os.makedirs(user_ssh_dir, int('700', 8)) except Exception: - module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir) + module.fail_json(msg=f"failed to create host key directory: {user_ssh_dir}") else: - module.fail_json(msg="%s does not exist" % user_ssh_dir) + module.fail_json(msg=f"{user_ssh_dir} does not exist") elif not os.path.isdir(user_ssh_dir): - module.fail_json(msg="%s is not a directory" % user_ssh_dir) + module.fail_json(msg=f"{user_ssh_dir} is not a directory") if port: - this_cmd = "%s -t %s -p %s %s" % (keyscan_cmd, key_type, port, fqdn) + this_cmd = f"{keyscan_cmd} -t {key_type} -p {port} {fqdn}" else: - this_cmd = "%s -t %s %s" % (keyscan_cmd, key_type, fqdn) + this_cmd = f"{keyscan_cmd} -t {key_type} {fqdn}" rc, out, err = module.run_command(this_cmd) # ssh-keyscan gives a 0 exit code and prints nothing on timeout if rc != 0 or not out: msg = 'failed to retrieve hostkey' if not out: - msg += '. "%s" returned no matches.' % this_cmd + msg += f'. "{this_cmd}" returned no matches.' else: - msg += ' using command "%s". [stdout]: %s' % (this_cmd, out) + msg += f' using command "{this_cmd}". [stdout]: {out}' if err: - msg += ' [stderr]: %s' % err + msg += f' [stderr]: {err}' module.fail_json(msg=msg) diff --git a/plugins/module_utils/ldap.py b/plugins/module_utils/ldap.py index 03acaa58c5..e0ee5940e2 100644 --- a/plugins/module_utils/ldap.py +++ b/plugins/module_utils/ldap.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2016, Peter Sagerson # Copyright (c) 2016, Jiri Tyr @@ -7,9 +6,9 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations +import re import traceback from ansible.module_utils.common.text.converters import to_native @@ -33,34 +32,49 @@ def gen_specs(**specs): specs.update({ 'bind_dn': dict(), 'bind_pw': dict(default='', no_log=True), + 'ca_path': dict(type='path'), 'dn': dict(required=True), 'referrals_chasing': dict(type='str', default='anonymous', choices=['disabled', 'anonymous']), 'server_uri': dict(default='ldapi:///'), 'start_tls': dict(default=False, type='bool'), 'validate_certs': dict(default=True, type='bool'), 'sasl_class': dict(choices=['external', 'gssapi'], default='external', type='str'), + 'xorder_discovery': dict(choices=['enable', 'auto', 'disable'], default='auto', type='str'), + 'client_cert': dict(default=None, type='path'), + 'client_key': dict(default=None, type='path'), }) return specs +def ldap_required_together(): + return [['client_cert', 'client_key']] + + class LdapGeneric(object): def __init__(self, module): # Shortcuts self.module = module self.bind_dn = self.module.params['bind_dn'] self.bind_pw = self.module.params['bind_pw'] + self.ca_path = self.module.params['ca_path'] self.referrals_chasing = self.module.params['referrals_chasing'] self.server_uri = self.module.params['server_uri'] self.start_tls = self.module.params['start_tls'] self.verify_cert = self.module.params['validate_certs'] self.sasl_class = self.module.params['sasl_class'] + self.xorder_discovery = self.module.params['xorder_discovery'] + self.client_cert = self.module.params['client_cert'] + self.client_key = self.module.params['client_key'] # Establish connection self.connection = self._connect_to_ldap() - # Try to find the X_ORDERed version of the DN - self.dn = self._find_dn() + if self.xorder_discovery == "enable" or (self.xorder_discovery == "auto" and not self._xorder_dn()): + # Try to find the X_ORDERed version of the DN + self.dn = self._find_dn() + else: + self.dn = self.module.params['dn'] def fail(self, msg, exn): self.module.fail_json( @@ -77,7 +91,7 @@ class LdapGeneric(object): if len(explode_dn) > 1: try: escaped_value = ldap.filter.escape_filter_chars(explode_dn[0]) - filterstr = "(%s)" % escaped_value + filterstr = f"({escaped_value})" dns = self.connection.search_s(','.join(explode_dn[1:]), ldap.SCOPE_ONELEVEL, filterstr) if len(dns) == 1: @@ -91,6 +105,13 @@ class LdapGeneric(object): if not self.verify_cert: ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER) + if self.ca_path: + ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, self.ca_path) + + if self.client_cert and self.client_key: + ldap.set_option(ldap.OPT_X_TLS_CERTFILE, self.client_cert) + ldap.set_option(ldap.OPT_X_TLS_KEYFILE, self.client_key) + connection = ldap.initialize(self.server_uri) if self.referrals_chasing == 'disabled': @@ -113,3 +134,10 @@ class LdapGeneric(object): self.fail("Cannot bind to the server.", e) return connection + + def _xorder_dn(self): + # match X_ORDERed DNs + regex = r".+\{\d+\}.+" + explode_dn = ldap.dn.explode_dn(self.module.params['dn']) + + return re.match(regex, explode_dn[0]) is not None diff --git a/plugins/module_utils/linode.py b/plugins/module_utils/linode.py index cedd3e0d5c..3700082bd8 100644 --- a/plugins/module_utils/linode.py +++ b/plugins/module_utils/linode.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -10,14 +9,11 @@ # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + +from ansible.module_utils.ansible_release import __version__ as ansible_version def get_user_agent(module): """Retrieve a user-agent to send with LinodeClient requests.""" - try: - from ansible.module_utils.ansible_release import __version__ as ansible_version - except ImportError: - ansible_version = 'unknown' - return 'Ansible-%s/%s' % (module, ansible_version) + return f'Ansible-{module}/{ansible_version}' diff --git a/plugins/module_utils/locale_gen.py b/plugins/module_utils/locale_gen.py new file mode 100644 index 0000000000..b8a48d320b --- /dev/null +++ b/plugins/module_utils/locale_gen.py @@ -0,0 +1,29 @@ +# Copyright (c) 2023, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +def locale_runner(module): + runner = CmdRunner( + module, + command=["locale", "-a"], + check_rc=True, + ) + return runner + + +def locale_gen_runner(module): + runner = CmdRunner( + module, + command="locale-gen", + arg_formats=dict( + name=cmd_runner_fmt.as_list(), + purge=cmd_runner_fmt.as_fixed('--purge'), + ), + check_rc=True, + ) + return runner diff --git a/plugins/module_utils/lxd.py b/plugins/module_utils/lxd.py index 007de4d8db..cc8e05c0f0 100644 --- a/plugins/module_utils/lxd.py +++ b/plugins/module_utils/lxd.py @@ -1,21 +1,19 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2016, Hiroaki Nakamura # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations +import http.client as http_client import os import socket import ssl import json +from urllib.parse import urlparse from ansible.module_utils.urls import generic_urlparse -from ansible.module_utils.six.moves.urllib.parse import urlparse -from ansible.module_utils.six.moves import http_client from ansible.module_utils.common.text.converters import to_text # httplib/http.client connection using unix domain socket @@ -41,7 +39,7 @@ class LXDClientException(Exception): class LXDClient(object): - def __init__(self, url, key_file=None, cert_file=None, debug=False): + def __init__(self, url, key_file=None, cert_file=None, debug=False, server_cert_file=None, server_check_hostname=True): """LXD Client. :param url: The URL of the LXD server. (e.g. unix:/var/lib/lxd/unix.socket or https://127.0.0.1) @@ -52,6 +50,10 @@ class LXDClient(object): :type cert_file: ``str`` :param debug: The debug flag. The request and response are stored in logs when debug is true. :type debug: ``bool`` + :param server_cert_file: The path of the server certificate file. + :type server_cert_file: ``str`` + :param server_check_hostname: Whether to check the server's hostname as part of TLS verification. + :type debug: ``bool`` """ self.url = url self.debug = debug @@ -60,7 +62,11 @@ class LXDClient(object): self.cert_file = cert_file self.key_file = key_file parts = generic_urlparse(urlparse(self.url)) - ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) + ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) + if server_cert_file: + # Check that the received cert is signed by the provided server_cert_file + ctx.load_verify_locations(cafile=server_cert_file) + ctx.check_hostname = server_check_hostname ctx.load_cert_chain(cert_file, keyfile=key_file) self.connection = HTTPSConnection(parts.get('netloc'), context=ctx) elif url.startswith('unix:'): @@ -72,7 +78,7 @@ class LXDClient(object): def do(self, method, url, body_json=None, ok_error_codes=None, timeout=None, wait_for_container=None): resp_json = self._send_request(method, url, body_json=body_json, ok_error_codes=ok_error_codes, timeout=timeout) if resp_json['type'] == 'async': - url = '{0}/wait'.format(resp_json['operation']) + url = f"{resp_json['operation']}/wait" resp_json = self._send_request('GET', url) if wait_for_container: while resp_json['metadata']['status'] == 'Running': diff --git a/plugins/module_utils/manageiq.py b/plugins/module_utils/manageiq.py index cbce05b8ec..477fc9a326 100644 --- a/plugins/module_utils/manageiq.py +++ b/plugins/module_utils/manageiq.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2017, Daniel Korn # @@ -11,8 +10,7 @@ # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os @@ -95,12 +93,12 @@ class ManageIQ(object): ca_bundle_path = params['ca_cert'] self._module = module - self._api_url = url + '/api' + self._api_url = f"{url}/api" self._auth = dict(user=username, password=password, token=token) try: self._client = ManageIQClient(self._api_url, self._auth, verify_ssl=verify_ssl, ca_bundle_path=ca_bundle_path) except Exception as e: - self.module.fail_json(msg="failed to open connection (%s): %s" % (url, str(e))) + self.module.fail_json(msg=f"failed to open connection ({url}): {e}") @property def module(self): @@ -140,7 +138,7 @@ class ManageIQ(object): except ValueError: return None except Exception as e: - self.module.fail_json(msg="failed to find resource {error}".format(error=e)) + self.module.fail_json(msg=f"failed to find resource {e}") return vars(entity) def find_collection_resource_or_fail(self, collection_name, **params): @@ -153,8 +151,7 @@ class ManageIQ(object): if resource: return resource else: - msg = "{collection_name} where {params} does not exist in manageiq".format( - collection_name=collection_name, params=str(params)) + msg = f"{collection_name} where {params!s} does not exist in manageiq" self.module.fail_json(msg=msg) def policies(self, resource_id, resource_type, resource_name): @@ -176,8 +173,7 @@ class ManageIQ(object): if resource: return resource["id"] else: - msg = "{resource_name} {resource_type} does not exist in manageiq".format( - resource_name=resource_name, resource_type=resource_type) + msg = f"{resource_name} {resource_type} does not exist in manageiq" self.module.fail_json(msg=msg) @@ -195,10 +191,7 @@ class ManageIQPolicies(object): self.resource_type = resource_type self.resource_id = resource_id - self.resource_url = '{api_url}/{resource_type}/{resource_id}'.format( - api_url=self.api_url, - resource_type=resource_type, - resource_id=resource_id) + self.resource_url = f'{self.api_url}/{resource_type}/{resource_id}' def query_profile_href(self, profile): """ Add or Update the policy_profile href field @@ -217,9 +210,7 @@ class ManageIQPolicies(object): try: response = self.client.get(url.format(resource_url=self.resource_url)) except Exception as e: - msg = "Failed to query {resource_type} policies: {error}".format( - resource_type=self.resource_type, - error=e) + msg = f"Failed to query {self.resource_type} policies: {e}" self.module.fail_json(msg=msg) resources = response.get('resources', []) @@ -237,9 +228,7 @@ class ManageIQPolicies(object): try: response = self.client.get(url.format(api_url=self.api_url, profile_id=profile_id)) except Exception as e: - msg = "Failed to query {resource_type} policies: {error}".format( - resource_type=self.resource_type, - error=e) + msg = f"Failed to query {self.resource_type} policies: {e}" self.module.fail_json(msg=msg) resources = response.get('policies', []) @@ -297,7 +286,7 @@ class ManageIQPolicies(object): # make a list of assigned full profile names strings # e.g. ['openscap profile', ...] - assigned_profiles_set = set([profile['profile_name'] for profile in assigned_profiles]) + assigned_profiles_set = set(profile['profile_name'] for profile in assigned_profiles) for profile in profiles: assigned = profile.get('name') in assigned_profiles_set @@ -318,34 +307,26 @@ class ManageIQPolicies(object): if not profiles_to_post: return dict( changed=False, - msg="Profiles {profiles} already {action}ed, nothing to do".format( - action=action, - profiles=profiles)) + msg=f"Profiles {profiles} already {action}ed, nothing to do") # try to assign or unassign profiles to resource - url = '{resource_url}/policy_profiles'.format(resource_url=self.resource_url) + url = f'{self.resource_url}/policy_profiles' try: response = self.client.post(url, action=action, resources=profiles_to_post) except Exception as e: - msg = "Failed to {action} profile: {error}".format( - action=action, - error=e) + msg = f"Failed to {action} profile: {e}" self.module.fail_json(msg=msg) # check all entities in result to be successful for result in response['results']: if not result['success']: - msg = "Failed to {action}: {message}".format( - action=action, - message=result['message']) + msg = f"Failed to {action}: {result['message']}" self.module.fail_json(msg=msg) # successfully changed all needed profiles return dict( changed=True, - msg="Successfully {action}ed profiles: {profiles}".format( - action=action, - profiles=profiles)) + msg=f"Successfully {action}ed profiles: {profiles}") class ManageIQTags(object): @@ -362,17 +343,12 @@ class ManageIQTags(object): self.resource_type = resource_type self.resource_id = resource_id - self.resource_url = '{api_url}/{resource_type}/{resource_id}'.format( - api_url=self.api_url, - resource_type=resource_type, - resource_id=resource_id) + self.resource_url = f'{self.api_url}/{resource_type}/{resource_id}' def full_tag_name(self, tag): """ Returns the full tag name in manageiq """ - return '/managed/{tag_category}/{tag_name}'.format( - tag_category=tag['category'], - tag_name=tag['name']) + return f"/managed/{tag['category']}/{tag['name']}" def clean_tag_object(self, tag): """ Clean a tag object to have human readable form of: @@ -399,9 +375,7 @@ class ManageIQTags(object): try: response = self.client.get(url.format(resource_url=self.resource_url)) except Exception as e: - msg = "Failed to query {resource_type} tags: {error}".format( - resource_type=self.resource_type, - error=e) + msg = f"Failed to query {self.resource_type} tags: {e}" self.module.fail_json(msg=msg) resources = response.get('resources', []) @@ -424,7 +398,7 @@ class ManageIQTags(object): # make a list of assigned full tag names strings # e.g. ['/managed/environment/prod', ...] - assigned_tags_set = set([tag['full_name'] for tag in assigned_tags]) + assigned_tags_set = set(tag['full_name'] for tag in assigned_tags) for tag in tags: assigned = self.full_tag_name(tag) in assigned_tags_set @@ -444,27 +418,23 @@ class ManageIQTags(object): if not tags_to_post: return dict( changed=False, - msg="Tags already {action}ed, nothing to do".format(action=action)) + msg=f"Tags already {action}ed, nothing to do") # try to assign or unassign tags to resource - url = '{resource_url}/tags'.format(resource_url=self.resource_url) + url = f'{self.resource_url}/tags' try: response = self.client.post(url, action=action, resources=tags) except Exception as e: - msg = "Failed to {action} tag: {error}".format( - action=action, - error=e) + msg = f"Failed to {action} tag: {e}" self.module.fail_json(msg=msg) # check all entities in result to be successful for result in response['results']: if not result['success']: - msg = "Failed to {action}: {message}".format( - action=action, - message=result['message']) + msg = f"Failed to {action}: {result['message']}" self.module.fail_json(msg=msg) # successfully changed all needed tags return dict( changed=True, - msg="Successfully {action}ed tags".format(action=action)) + msg=f"Successfully {action}ed tags") diff --git a/plugins/module_utils/memset.py b/plugins/module_utils/memset.py index 671a8de308..cbfbc9108a 100644 --- a/plugins/module_utils/memset.py +++ b/plugins/module_utils/memset.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -10,12 +9,12 @@ # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import open_url, urllib_error +from urllib.parse import urlencode +from ansible.module_utils.urls import open_url from ansible.module_utils.basic import json +import urllib.error as urllib_error class Response(object): @@ -26,6 +25,7 @@ class Response(object): def __init__(self): self.content = None self.status_code = None + self.stderr = None def json(self): return json.loads(self.content) @@ -55,7 +55,7 @@ def memset_api_call(api_key, api_method, payload=None): data = urlencode(payload) headers = {'Content-Type': 'application/x-www-form-urlencoded'} api_uri_base = 'https://api.memset.com/v1/json/' - api_uri = '{0}{1}/' . format(api_uri_base, api_method) + api_uri = f'{api_uri_base}{api_method}/' try: resp = open_url(api_uri, data=data, headers=headers, method="POST", force_basic_auth=True, url_username=api_key) @@ -72,9 +72,13 @@ def memset_api_call(api_key, api_method, payload=None): response.status_code = errorcode if response.status_code is not None: - msg = "Memset API returned a {0} response ({1}, {2})." . format(response.status_code, response.json()['error_type'], response.json()['error']) + msg = f"Memset API returned a {response.status_code} response ({response.json()['error_type']}, {response.json()['error']})." else: - msg = "Memset API returned an error ({0}, {1})." . format(response.json()['error_type'], response.json()['error']) + msg = f"Memset API returned an error ({response.json()['error_type']}, {response.json()['error']})." + except urllib_error.URLError as e: + has_failed = True + msg = f"An URLError occurred ({type(e)})." + response.stderr = f"{e}" if msg is None: msg = response.json() diff --git a/plugins/module_utils/mh/base.py b/plugins/module_utils/mh/base.py index b10762eaba..688d65fc35 100644 --- a/plugins/module_utils/mh/base.py +++ b/plugins/module_utils/mh/base.py @@ -1,11 +1,9 @@ -# -*- coding: utf-8 -*- # (c) 2020, Alexei Znamensky # Copyright (c) 2020, Ansible Project # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException as _MHE @@ -16,7 +14,7 @@ class ModuleHelperBase(object): module = None ModuleHelperException = _MHE _delegated_to_module = ( - 'check_mode', 'get_bin_path', 'warn', 'deprecate', + 'check_mode', 'get_bin_path', 'warn', 'deprecate', 'debug', ) def __init__(self, module=None): @@ -42,7 +40,7 @@ class ModuleHelperBase(object): def __getattr__(self, attr): if attr in self._delegated_to_module: return getattr(self.module, attr) - raise AttributeError("ModuleHelperBase has no attribute '%s'" % (attr, )) + raise AttributeError(f"ModuleHelperBase has no attribute '{attr}'") def __init_module__(self): pass diff --git a/plugins/module_utils/mh/deco.py b/plugins/module_utils/mh/deco.py index 5138b212c7..0be576ccfa 100644 --- a/plugins/module_utils/mh/deco.py +++ b/plugins/module_utils/mh/deco.py @@ -1,11 +1,9 @@ -# -*- coding: utf-8 -*- # (c) 2020, Alexei Znamensky # Copyright (c) 2020, Ansible Project # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import traceback from functools import wraps @@ -13,23 +11,21 @@ from functools import wraps from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException -def cause_changes(on_success=None, on_failure=None): - +def cause_changes(when=None): def deco(func): - if on_success is None and on_failure is None: - return func - @wraps(func) - def wrapper(*args, **kwargs): + def wrapper(self, *args, **kwargs): try: - self = args[0] - func(*args, **kwargs) - if on_success is not None: - self.changed = on_success + func(self, *args, **kwargs) + if when == "success": + self.changed = True except Exception: - if on_failure is not None: - self.changed = on_failure + if when == "failure": + self.changed = True raise + finally: + if when == "always": + self.changed = True return wrapper @@ -41,17 +37,15 @@ def module_fails_on_exception(func): @wraps(func) def wrapper(self, *args, **kwargs): + def fix_key(k): + return k if k not in conflict_list else f"_{k}" + def fix_var_conflicts(output): - result = dict([ - (k if k not in conflict_list else "_" + k, v) - for k, v in output.items() - ]) + result = {fix_key(k): v for k, v in output.items()} return result try: func(self, *args, **kwargs) - except SystemExit: - raise except ModuleHelperException as e: if e.update_output: self.update_output(e.update_output) @@ -62,7 +56,7 @@ def module_fails_on_exception(func): except Exception as e: # patchy solution to resolve conflict with output variables output = fix_var_conflicts(self.output) - msg = "Module failed with exception: {0}".format(str(e).strip()) + msg = f"Module failed with exception: {str(e).strip()}" self.module.fail_json(msg=msg, exception=traceback.format_exc(), output=self.output, vars=self.vars.output(), **output) return wrapper @@ -73,6 +67,7 @@ def check_mode_skip(func): def wrapper(self, *args, **kwargs): if not self.module.check_mode: return func(self, *args, **kwargs) + return wrapper @@ -87,7 +82,7 @@ def check_mode_skip_returns(callable=None, value=None): return func(self, *args, **kwargs) return wrapper_callable - if value is not None: + else: @wraps(func) def wrapper_value(self, *args, **kwargs): if self.module.check_mode: @@ -95,7 +90,4 @@ def check_mode_skip_returns(callable=None, value=None): return func(self, *args, **kwargs) return wrapper_value - if callable is None and value is None: - return check_mode_skip - return deco diff --git a/plugins/module_utils/mh/exceptions.py b/plugins/module_utils/mh/exceptions.py index 68af5ba672..94bb7d7fff 100644 --- a/plugins/module_utils/mh/exceptions.py +++ b/plugins/module_utils/mh/exceptions.py @@ -1,18 +1,16 @@ -# -*- coding: utf-8 -*- # (c) 2020, Alexei Znamensky # Copyright (c) 2020, Ansible Project # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations from ansible.module_utils.common.text.converters import to_native class ModuleHelperException(Exception): def __init__(self, msg, update_output=None, *args, **kwargs): - self.msg = to_native(msg or "Module failed with exception: {0}".format(self)) + self.msg = to_native(msg or f"Module failed with exception: {self}") if update_output is None: update_output = {} self.update_output = update_output diff --git a/plugins/module_utils/mh/mixins/cmd.py b/plugins/module_utils/mh/mixins/cmd.py deleted file mode 100644 index a7d3793949..0000000000 --- a/plugins/module_utils/mh/mixins/cmd.py +++ /dev/null @@ -1,205 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2020, Alexei Znamensky -# Copyright (c) 2020, Ansible Project -# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) -# SPDX-License-Identifier: BSD-2-Clause - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -from functools import partial - - -class ArgFormat(object): - """ - Argument formatter for use as a command line parameter. Used in CmdMixin. - """ - BOOLEAN = 0 - PRINTF = 1 - FORMAT = 2 - BOOLEAN_NOT = 3 - - @staticmethod - def stars_deco(num): - if num == 1: - def deco(f): - return lambda v: f(*v) - return deco - elif num == 2: - def deco(f): - return lambda v: f(**v) - return deco - - return lambda f: f - - def __init__(self, name, fmt=None, style=FORMAT, stars=0): - """ - THIS CLASS IS BEING DEPRECATED. - It was never meant to be used outside the scope of CmdMixin, and CmdMixin is being deprecated. - See the deprecation notice in ``CmdMixin.__init__()`` below. - - Creates a CLI-formatter for one specific argument. The argument may be a module parameter or just a named parameter for - the CLI command execution. - :param name: Name of the argument to be formatted - :param fmt: Either a str to be formatted (using or not printf-style) or a callable that does that - :param style: Whether arg_format (as str) should use printf-style formatting. - Ignored if arg_format is None or not a str (should be callable). - :param stars: A int with 0, 1 or 2 value, indicating to formatting the value as: value, *value or **value - """ - def printf_fmt(_fmt, v): - try: - return [_fmt % v] - except TypeError as e: - if e.args[0] != 'not all arguments converted during string formatting': - raise - return [_fmt] - - _fmts = { - ArgFormat.BOOLEAN: lambda _fmt, v: ([_fmt] if bool(v) else []), - ArgFormat.BOOLEAN_NOT: lambda _fmt, v: ([] if bool(v) else [_fmt]), - ArgFormat.PRINTF: printf_fmt, - ArgFormat.FORMAT: lambda _fmt, v: [_fmt.format(v)], - } - - self.name = name - self.stars = stars - self.style = style - - if fmt is None: - fmt = "{0}" - style = ArgFormat.FORMAT - - if isinstance(fmt, str): - func = _fmts[style] - self.arg_format = partial(func, fmt) - elif isinstance(fmt, list) or isinstance(fmt, tuple): - self.arg_format = lambda v: [_fmts[style](f, v)[0] for f in fmt] - elif hasattr(fmt, '__call__'): - self.arg_format = fmt - else: - raise TypeError('Parameter fmt must be either: a string, a list/tuple of ' - 'strings or a function: type={0}, value={1}'.format(type(fmt), fmt)) - - if stars: - self.arg_format = (self.stars_deco(stars))(self.arg_format) - - def to_text(self, value): - if value is None and self.style != ArgFormat.BOOLEAN_NOT: - return [] - func = self.arg_format - return [str(p) for p in func(value)] - - -class CmdMixin(object): - """ - THIS CLASS IS BEING DEPRECATED. - See the deprecation notice in ``CmdMixin.__init__()`` below. - - Mixin for mapping module options to running a CLI command with its arguments. - """ - command = None - command_args_formats = {} - run_command_fixed_options = {} - check_rc = False - force_lang = "C" - - @property - def module_formats(self): - result = {} - for param in self.module.params.keys(): - result[param] = ArgFormat(param) - return result - - @property - def custom_formats(self): - result = {} - for param, fmt_spec in self.command_args_formats.items(): - result[param] = ArgFormat(param, **fmt_spec) - return result - - def __init__(self, *args, **kwargs): - super(CmdMixin, self).__init__(*args, **kwargs) - self.module.deprecate( - 'The CmdMixin used in classes CmdModuleHelper and CmdStateModuleHelper is being deprecated. ' - 'Modules should use community.general.plugins.module_utils.cmd_runner.CmdRunner instead.', - version='8.0.0', - collection_name='community.general', - ) - - def _calculate_args(self, extra_params=None, params=None): - def add_arg_formatted_param(_cmd_args, arg_format, _value): - args = list(arg_format.to_text(_value)) - return _cmd_args + args - - def find_format(_param): - return self.custom_formats.get(_param, self.module_formats.get(_param)) - - extra_params = extra_params or dict() - cmd_args = list([self.command]) if isinstance(self.command, str) else list(self.command) - try: - cmd_args[0] = self.module.get_bin_path(cmd_args[0], required=True) - except ValueError: - pass - param_list = params if params else self.vars.keys() - - for param in param_list: - if isinstance(param, dict): - if len(param) != 1: - self.do_raise("run_command parameter as a dict must contain only one key: {0}".format(param)) - _param = list(param.keys())[0] - fmt = find_format(_param) - value = param[_param] - elif isinstance(param, str): - if param in self.vars.keys(): - fmt = find_format(param) - value = self.vars[param] - elif param in extra_params: - fmt = find_format(param) - value = extra_params[param] - else: - self.do_raise('Cannot determine value for parameter: {0}'.format(param)) - else: - self.do_raise("run_command parameter must be either a str or a dict: {0}".format(param)) - cmd_args = add_arg_formatted_param(cmd_args, fmt, value) - - return cmd_args - - def process_command_output(self, rc, out, err): - return rc, out, err - - def run_command(self, - extra_params=None, - params=None, - process_output=None, - publish_rc=True, - publish_out=True, - publish_err=True, - publish_cmd=True, - *args, **kwargs): - cmd_args = self._calculate_args(extra_params, params) - options = dict(self.run_command_fixed_options) - options['check_rc'] = options.get('check_rc', self.check_rc) - options.update(kwargs) - env_update = dict(options.get('environ_update', {})) - if self.force_lang: - env_update.update({ - 'LANGUAGE': self.force_lang, - 'LC_ALL': self.force_lang, - }) - self.update_output(force_lang=self.force_lang) - options['environ_update'] = env_update - rc, out, err = self.module.run_command(cmd_args, *args, **options) - if publish_rc: - self.update_output(rc=rc) - if publish_out: - self.update_output(stdout=out) - if publish_err: - self.update_output(stderr=err) - if publish_cmd: - self.update_output(cmd_args=cmd_args) - if process_output is None: - _process = self.process_command_output - else: - _process = process_output - - return _process(rc, out, err) diff --git a/plugins/module_utils/mh/mixins/deprecate_attrs.py b/plugins/module_utils/mh/mixins/deprecate_attrs.py index c3bfb06c66..166e365782 100644 --- a/plugins/module_utils/mh/mixins/deprecate_attrs.py +++ b/plugins/module_utils/mh/mixins/deprecate_attrs.py @@ -1,11 +1,9 @@ -# -*- coding: utf-8 -*- # (c) 2020, Alexei Znamensky # Copyright (c) 2020, Ansible Project # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations from ansible.module_utils.basic import AnsibleModule @@ -17,7 +15,7 @@ class DeprecateAttrsMixin(object): if target is None: target = self if not hasattr(target, attr): - raise ValueError("Target {0} has no attribute {1}".format(target, attr)) + raise ValueError(f"Target {target} has no attribute {attr}") if module is None: if isinstance(target, AnsibleModule): module = target @@ -59,4 +57,4 @@ class DeprecateAttrsMixin(object): # override attribute prop = property(_getter) setattr(target, attr, prop) - setattr(target, "_{0}_setter".format(attr), prop.setter(_setter)) + setattr(target, f"_{attr}_setter", prop.setter(_setter)) diff --git a/plugins/module_utils/mh/mixins/deps.py b/plugins/module_utils/mh/mixins/deps.py deleted file mode 100644 index bab8c090bb..0000000000 --- a/plugins/module_utils/mh/mixins/deps.py +++ /dev/null @@ -1,59 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2020, Alexei Znamensky -# Copyright (c) 2020, Ansible Project -# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) -# SPDX-License-Identifier: BSD-2-Clause - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -import traceback - -from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase -from ansible_collections.community.general.plugins.module_utils.mh.deco import module_fails_on_exception - - -class DependencyCtxMgr(object): - def __init__(self, name, msg=None): - self.name = name - self.msg = msg - self.has_it = False - self.exc_type = None - self.exc_val = None - self.exc_tb = None - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.has_it = exc_type is None - self.exc_type = exc_type - self.exc_val = exc_val - self.exc_tb = exc_tb - return not self.has_it - - @property - def text(self): - return self.msg or str(self.exc_val) - - -class DependencyMixin(ModuleHelperBase): - _dependencies = [] - - @classmethod - def dependency(cls, name, msg): - cls._dependencies.append(DependencyCtxMgr(name, msg)) - return cls._dependencies[-1] - - def fail_on_missing_deps(self): - for d in self._dependencies: - if not d.has_it: - self.module.fail_json(changed=False, - exception="\n".join(traceback.format_exception(d.exc_type, d.exc_val, d.exc_tb)), - msg=d.text, - **self.output) - - @module_fails_on_exception - def run(self): - self.fail_on_missing_deps() - super(DependencyMixin, self).run() diff --git a/plugins/module_utils/mh/mixins/state.py b/plugins/module_utils/mh/mixins/state.py index 4e29379890..a04c3b1386 100644 --- a/plugins/module_utils/mh/mixins/state.py +++ b/plugins/module_utils/mh/mixins/state.py @@ -1,11 +1,9 @@ -# -*- coding: utf-8 -*- # (c) 2020, Alexei Znamensky # Copyright (c) 2020, Ansible Project # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations class StateMixin(object): @@ -17,7 +15,7 @@ class StateMixin(object): return self.default_state if state is None else state def _method(self, state): - return "{0}_{1}".format(self.state_param, state) + return f"{self.state_param}_{state}" def __run__(self): state = self._state() @@ -37,4 +35,4 @@ class StateMixin(object): return func() def __state_fallback__(self): - raise ValueError("Cannot find method: {0}".format(self._method(self._state()))) + raise ValueError(f"Cannot find method: {self._method(self._state())}") diff --git a/plugins/module_utils/mh/mixins/vars.py b/plugins/module_utils/mh/mixins/vars.py deleted file mode 100644 index 6dfb29bab8..0000000000 --- a/plugins/module_utils/mh/mixins/vars.py +++ /dev/null @@ -1,135 +0,0 @@ -# -*- coding: utf-8 -*- -# (c) 2020, Alexei Znamensky -# Copyright (c) 2020, Ansible Project -# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) -# SPDX-License-Identifier: BSD-2-Clause - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -import copy - - -class VarMeta(object): - NOTHING = object() - - def __init__(self, diff=False, output=True, change=None, fact=False): - self.init = False - self.initial_value = None - self.value = None - - self.diff = diff - self.change = diff if change is None else change - self.output = output - self.fact = fact - - def set(self, diff=None, output=None, change=None, fact=None, initial_value=NOTHING): - if diff is not None: - self.diff = diff - if output is not None: - self.output = output - if change is not None: - self.change = change - if fact is not None: - self.fact = fact - if initial_value is not self.NOTHING: - self.initial_value = copy.deepcopy(initial_value) - - def set_value(self, value): - if not self.init: - self.initial_value = copy.deepcopy(value) - self.init = True - self.value = value - return self - - @property - def has_changed(self): - return self.change and (self.initial_value != self.value) - - @property - def diff_result(self): - return None if not (self.diff and self.has_changed) else { - 'before': self.initial_value, - 'after': self.value, - } - - def __str__(self): - return "".format( - self.value, self.initial_value, self.diff, self.output, self.change - ) - - -class VarDict(object): - def __init__(self): - self._data = dict() - self._meta = dict() - - def __getitem__(self, item): - return self._data[item] - - def __setitem__(self, key, value): - self.set(key, value) - - def __getattr__(self, item): - try: - return self._data[item] - except KeyError: - return getattr(self._data, item) - - def __setattr__(self, key, value): - if key in ('_data', '_meta'): - super(VarDict, self).__setattr__(key, value) - else: - self.set(key, value) - - def meta(self, name): - return self._meta[name] - - def set_meta(self, name, **kwargs): - self.meta(name).set(**kwargs) - - def set(self, name, value, **kwargs): - if name in ('_data', '_meta'): - raise ValueError("Names _data and _meta are reserved for use by ModuleHelper") - self._data[name] = value - if name in self._meta: - meta = self.meta(name) - else: - meta = VarMeta(**kwargs) - meta.set_value(value) - self._meta[name] = meta - - def output(self): - return dict((k, v) for k, v in self._data.items() if self.meta(k).output) - - def diff(self): - diff_results = [(k, self.meta(k).diff_result) for k in self._data] - diff_results = [dr for dr in diff_results if dr[1] is not None] - if diff_results: - before = dict((dr[0], dr[1]['before']) for dr in diff_results) - after = dict((dr[0], dr[1]['after']) for dr in diff_results) - return {'before': before, 'after': after} - return None - - def facts(self): - facts_result = dict((k, v) for k, v in self._data.items() if self._meta[k].fact) - return facts_result if facts_result else None - - def change_vars(self): - return [v for v in self._data if self.meta(v).change] - - def has_changed(self, v): - return self._meta[v].has_changed - - -class VarsMixin(object): - - def __init__(self, module=None): - self.vars = VarDict() - super(VarsMixin, self).__init__(module) - - def update_vars(self, meta=None, **kwargs): - if meta is None: - meta = {} - for k, v in kwargs.items(): - self.vars.set(k, v, **meta) diff --git a/plugins/module_utils/mh/module_helper.py b/plugins/module_utils/mh/module_helper.py index 6813b5454b..fdce99045c 100644 --- a/plugins/module_utils/mh/module_helper.py +++ b/plugins/module_utils/mh/module_helper.py @@ -1,23 +1,20 @@ -# -*- coding: utf-8 -*- -# (c) 2020, Alexei Znamensky -# Copyright (c) 2020, Ansible Project +# (c) 2020-2024, Alexei Znamensky +# Copyright (c) 2020-2024, Ansible Project # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations + from ansible.module_utils.common.dict_transformations import dict_merge -from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase, AnsibleModule -from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd import CmdMixin +from ansible_collections.community.general.plugins.module_utils.vardict import VarDict +from ansible_collections.community.general.plugins.module_utils.mh.base import ModuleHelperBase from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin -from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyMixin -from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarsMixin from ansible_collections.community.general.plugins.module_utils.mh.mixins.deprecate_attrs import DeprecateAttrsMixin -class ModuleHelper(DeprecateAttrsMixin, VarsMixin, DependencyMixin, ModuleHelperBase): +class ModuleHelper(DeprecateAttrsMixin, ModuleHelperBase): facts_name = None output_params = () diff_params = () @@ -26,6 +23,8 @@ class ModuleHelper(DeprecateAttrsMixin, VarsMixin, DependencyMixin, ModuleHelper def __init__(self, module=None): super(ModuleHelper, self).__init__(module) + + self.vars = VarDict() for name, value in self.module.params.items(): self.vars.set( name, value, @@ -35,6 +34,12 @@ class ModuleHelper(DeprecateAttrsMixin, VarsMixin, DependencyMixin, ModuleHelper fact=name in self.facts_params, ) + def update_vars(self, meta=None, **kwargs): + if meta is None: + meta = {} + for k, v in kwargs.items(): + self.vars.set(k, v, **meta) + def update_output(self, **kwargs): self.update_vars(meta={"output": True}, **kwargs) @@ -42,7 +47,7 @@ class ModuleHelper(DeprecateAttrsMixin, VarsMixin, DependencyMixin, ModuleHelper self.update_vars(meta={"fact": True}, **kwargs) def _vars_changed(self): - return any(self.vars.has_changed(v) for v in self.vars.change_vars()) + return self.vars.has_changed def has_changed(self): return self.changed or self._vars_changed() @@ -64,19 +69,3 @@ class ModuleHelper(DeprecateAttrsMixin, VarsMixin, DependencyMixin, ModuleHelper class StateModuleHelper(StateMixin, ModuleHelper): pass - - -class CmdModuleHelper(CmdMixin, ModuleHelper): - """ - THIS CLASS IS BEING DEPRECATED. - See the deprecation notice in ``CmdMixin.__init__()``. - """ - pass - - -class CmdStateModuleHelper(CmdMixin, StateMixin, ModuleHelper): - """ - THIS CLASS IS BEING DEPRECATED. - See the deprecation notice in ``CmdMixin.__init__()``. - """ - pass diff --git a/plugins/module_utils/module_helper.py b/plugins/module_utils/module_helper.py index 4cda4175c7..f5c6275741 100644 --- a/plugins/module_utils/module_helper.py +++ b/plugins/module_utils/module_helper.py @@ -1,19 +1,16 @@ -# -*- coding: utf-8 -*- # (c) 2020, Alexei Znamensky # Copyright (c) 2020, Ansible Project # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations +# pylint: disable=unused-import from ansible_collections.community.general.plugins.module_utils.mh.module_helper import ( - ModuleHelper, StateModuleHelper, CmdModuleHelper, CmdStateModuleHelper, AnsibleModule + ModuleHelper, StateModuleHelper, +) +from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException # noqa: F401 +from ansible_collections.community.general.plugins.module_utils.mh.deco import ( + cause_changes, module_fails_on_exception, check_mode_skip, check_mode_skip_returns, ) -from ansible_collections.community.general.plugins.module_utils.mh.mixins.cmd import CmdMixin, ArgFormat -from ansible_collections.community.general.plugins.module_utils.mh.mixins.state import StateMixin -from ansible_collections.community.general.plugins.module_utils.mh.mixins.deps import DependencyCtxMgr -from ansible_collections.community.general.plugins.module_utils.mh.exceptions import ModuleHelperException -from ansible_collections.community.general.plugins.module_utils.mh.deco import cause_changes, module_fails_on_exception -from ansible_collections.community.general.plugins.module_utils.mh.mixins.vars import VarMeta, VarDict diff --git a/plugins/module_utils/net_tools/pritunl/api.py b/plugins/module_utils/net_tools/pritunl/api.py index cd2abc568e..7d6bd7fe86 100644 --- a/plugins/module_utils/net_tools/pritunl/api.py +++ b/plugins/module_utils/net_tools/pritunl/api.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2021, Florian Dambrine # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -7,7 +6,7 @@ Pritunl API that offers CRUD operations on Pritunl Organizations and Users """ -from __future__ import absolute_import, division, print_function +from __future__ import annotations import base64 import hashlib @@ -16,11 +15,8 @@ import json import time import uuid -from ansible.module_utils.six import iteritems from ansible.module_utils.urls import open_url -__metaclass__ = type - class PritunlException(Exception): pass @@ -66,7 +62,7 @@ def _delete_pritunl_organization( api_token=api_token, api_secret=api_secret, method="DELETE", - path="/organization/%s" % (organization_id), + path=f"/organization/{organization_id}", validate_certs=validate_certs, ) @@ -79,7 +75,7 @@ def _post_pritunl_organization( api_secret=api_secret, base_url=base_url, method="POST", - path="/organization/%s", + path="/organization", headers={"Content-Type": "application/json"}, data=json.dumps(organization_data), validate_certs=validate_certs, @@ -94,7 +90,7 @@ def _get_pritunl_users( api_secret=api_secret, base_url=base_url, method="GET", - path="/user/%s" % organization_id, + path=f"/user/{organization_id}", validate_certs=validate_certs, ) @@ -107,7 +103,7 @@ def _delete_pritunl_user( api_secret=api_secret, base_url=base_url, method="DELETE", - path="/user/%s/%s" % (organization_id, user_id), + path=f"/user/{organization_id}/{user_id}", validate_certs=validate_certs, ) @@ -120,7 +116,7 @@ def _post_pritunl_user( api_secret=api_secret, base_url=base_url, method="POST", - path="/user/%s" % organization_id, + path=f"/user/{organization_id}", headers={"Content-Type": "application/json"}, data=json.dumps(user_data), validate_certs=validate_certs, @@ -141,7 +137,7 @@ def _put_pritunl_user( api_secret=api_secret, base_url=base_url, method="PUT", - path="/user/%s/%s" % (organization_id, user_id), + path=f"/user/{organization_id}/{user_id}", headers={"Content-Type": "application/json"}, data=json.dumps(user_data), validate_certs=validate_certs, @@ -170,7 +166,7 @@ def list_pritunl_organizations( else: if not any( filter_val != org[filter_key] - for filter_key, filter_val in iteritems(filters) + for filter_key, filter_val in filters.items() ): orgs.append(org) @@ -201,7 +197,7 @@ def list_pritunl_users( else: if not any( filter_val != user[filter_key] - for filter_key, filter_val in iteritems(filters) + for filter_key, filter_val in filters.items() ): users.append(user) @@ -220,12 +216,12 @@ def post_pritunl_organization( api_secret=api_secret, base_url=base_url, organization_data={"name": organization_name}, - validate_certs=True, + validate_certs=validate_certs, ) if response.getcode() != 200: raise PritunlException( - "Could not add organization %s to Pritunl" % (organization_name) + f"Could not add organization {organization_name} to Pritunl" ) # The user PUT request returns the updated user object return json.loads(response.read()) @@ -248,13 +244,12 @@ def post_pritunl_user( base_url=base_url, organization_id=organization_id, user_data=user_data, - validate_certs=True, + validate_certs=validate_certs, ) if response.getcode() != 200: raise PritunlException( - "Could not remove user %s from organization %s from Pritunl" - % (user_id, organization_id) + f"Could not remove user {user_id} from organization {organization_id} from Pritunl" ) # user POST request returns an array of a single item, # so return this item instead of the list @@ -267,13 +262,12 @@ def post_pritunl_user( organization_id=organization_id, user_data=user_data, user_id=user_id, - validate_certs=True, + validate_certs=validate_certs, ) if response.getcode() != 200: raise PritunlException( - "Could not update user %s from organization %s from Pritunl" - % (user_id, organization_id) + f"Could not update user {user_id} from organization {organization_id} from Pritunl" ) # The user PUT request returns the updated user object return json.loads(response.read()) @@ -287,12 +281,12 @@ def delete_pritunl_organization( api_secret=api_secret, base_url=base_url, organization_id=organization_id, - validate_certs=True, + validate_certs=validate_certs, ) if response.getcode() != 200: raise PritunlException( - "Could not remove organization %s from Pritunl" % (organization_id) + f"Could not remove organization {organization_id} from Pritunl" ) return json.loads(response.read()) @@ -307,13 +301,12 @@ def delete_pritunl_user( base_url=base_url, organization_id=organization_id, user_id=user_id, - validate_certs=True, + validate_certs=validate_certs, ) if response.getcode() != 200: raise PritunlException( - "Could not remove user %s from organization %s from Pritunl" - % (user_id, organization_id) + f"Could not remove user {user_id} from organization {organization_id} from Pritunl" ) return json.loads(response.read()) @@ -331,14 +324,12 @@ def pritunl_auth_request( ): """ Send an API call to a Pritunl server. - Taken from https://pritunl.com/api and adaped work with Ansible open_url + Taken from https://pritunl.com/api and adapted to work with Ansible open_url """ auth_timestamp = str(int(time.time())) auth_nonce = uuid.uuid4().hex - auth_string = "&".join( - [api_token, auth_timestamp, auth_nonce, method.upper(), path] - ) + auth_string = f"{api_token}&{auth_timestamp}&{auth_nonce}&{method.upper()}&{path}" auth_signature = base64.b64encode( hmac.new( @@ -357,7 +348,7 @@ def pritunl_auth_request( auth_headers.update(headers) try: - uri = "%s%s" % (base_url, path) + uri = f"{base_url}{path}" return open_url( uri, diff --git a/plugins/module_utils/ocapi_utils.py b/plugins/module_utils/ocapi_utils.py index acc2ceae49..fd606d9bcc 100644 --- a/plugins/module_utils/ocapi_utils.py +++ b/plugins/module_utils/ocapi_utils.py @@ -1,21 +1,18 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2022 Western Digital Corporation # GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import json import os import uuid +from urllib.error import URLError, HTTPError +from urllib.parse import urlparse from ansible.module_utils.urls import open_url from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.common.text.converters import to_text -from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError -from ansible.module_utils.six.moves.urllib.parse import urlparse GET_HEADERS = {'accept': 'application/json'} @@ -56,19 +53,17 @@ class OcapiUtils(object): follow_redirects='all', use_proxy=True, timeout=self.timeout) data = json.loads(to_native(resp.read())) - headers = dict((k.lower(), v) for (k, v) in resp.info().items()) + headers = {k.lower(): v for (k, v) in resp.info().items()} except HTTPError as e: return {'ret': False, - 'msg': "HTTP Error %s on GET request to '%s'" - % (e.code, uri), + 'msg': f"HTTP Error {e.code} on GET request to '{uri}'", 'status': e.code} except URLError as e: - return {'ret': False, 'msg': "URL Error on GET request to '%s': '%s'" - % (uri, e.reason)} + return {'ret': False, 'msg': f"URL Error on GET request to '{uri}': '{e.reason}'"} # Almost all errors should be caught above, but just in case except Exception as e: return {'ret': False, - 'msg': "Failed GET request to '%s': '%s'" % (uri, to_text(e))} + 'msg': f"Failed GET request to '{uri}': '{e}'"} return {'ret': True, 'data': data, 'headers': headers} def delete_request(self, uri, etag=None): @@ -86,19 +81,17 @@ class OcapiUtils(object): data = json.loads(to_native(resp.read())) else: data = "" - headers = dict((k.lower(), v) for (k, v) in resp.info().items()) + headers = {k.lower(): v for (k, v) in resp.info().items()} except HTTPError as e: return {'ret': False, - 'msg': "HTTP Error %s on DELETE request to '%s'" - % (e.code, uri), + 'msg': f"HTTP Error {e.code} on DELETE request to '{uri}'", 'status': e.code} except URLError as e: - return {'ret': False, 'msg': "URL Error on DELETE request to '%s': '%s'" - % (uri, e.reason)} + return {'ret': False, 'msg': f"URL Error on DELETE request to '{uri}': '{e.reason}'"} # Almost all errors should be caught above, but just in case except Exception as e: return {'ret': False, - 'msg': "Failed DELETE request to '%s': '%s'" % (uri, to_text(e))} + 'msg': f"Failed DELETE request to '{uri}': '{e}'"} return {'ret': True, 'data': data, 'headers': headers} def put_request(self, uri, payload, etag=None): @@ -113,19 +106,17 @@ class OcapiUtils(object): force_basic_auth=basic_auth, validate_certs=False, follow_redirects='all', use_proxy=True, timeout=self.timeout) - headers = dict((k.lower(), v) for (k, v) in resp.info().items()) + headers = {k.lower(): v for (k, v) in resp.info().items()} except HTTPError as e: return {'ret': False, - 'msg': "HTTP Error %s on PUT request to '%s'" - % (e.code, uri), + 'msg': f"HTTP Error {e.code} on PUT request to '{uri}'", 'status': e.code} except URLError as e: - return {'ret': False, 'msg': "URL Error on PUT request to '%s': '%s'" - % (uri, e.reason)} + return {'ret': False, 'msg': f"URL Error on PUT request to '{uri}': '{e.reason}'"} # Almost all errors should be caught above, but just in case except Exception as e: return {'ret': False, - 'msg': "Failed PUT request to '%s': '%s'" % (uri, to_text(e))} + 'msg': f"Failed PUT request to '{uri}': '{e}'"} return {'ret': True, 'headers': headers, 'resp': resp} def post_request(self, uri, payload, content_type="application/json", timeout=None): @@ -144,19 +135,17 @@ class OcapiUtils(object): force_basic_auth=basic_auth, validate_certs=False, follow_redirects='all', use_proxy=True, timeout=self.timeout if timeout is None else timeout) - headers = dict((k.lower(), v) for (k, v) in resp.info().items()) + headers = {k.lower(): v for (k, v) in resp.info().items()} except HTTPError as e: return {'ret': False, - 'msg': "HTTP Error %s on POST request to '%s'" - % (e.code, uri), + 'msg': f"HTTP Error {e.code} on POST request to '{uri}'", 'status': e.code} except URLError as e: - return {'ret': False, 'msg': "URL Error on POST request to '%s': '%s'" - % (uri, e.reason)} + return {'ret': False, 'msg': f"URL Error on POST request to '{uri}': '{e.reason}'"} # Almost all errors should be caught above, but just in case except Exception as e: return {'ret': False, - 'msg': "Failed POST request to '%s': '%s'" % (uri, to_text(e))} + 'msg': f"Failed POST request to '{uri}': '{e}'"} return {'ret': True, 'headers': headers, 'resp': resp} def get_uri_with_slot_number_query_param(self, uri): @@ -168,7 +157,7 @@ class OcapiUtils(object): """ if self.proxy_slot_number is not None: parsed_url = urlparse(uri) - return parsed_url._replace(query="slotnumber=" + str(self.proxy_slot_number)).geturl() + return parsed_url._replace(query=f"slotnumber={self.proxy_slot_number}").geturl() else: return uri @@ -203,7 +192,7 @@ class OcapiUtils(object): elif command.startswith("PowerMode"): return self.manage_power_mode(command) else: - return {'ret': False, 'msg': 'Invalid command: ' + command} + return {'ret': False, 'msg': f"Invalid command: {command}"} return {'ret': True} @@ -242,7 +231,7 @@ class OcapiUtils(object): return response data = response['data'] if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} + return {'ret': False, 'msg': f"Key {key} not found"} if 'ID' not in data[key]: return {'ret': False, 'msg': 'IndicatorLED for resource has no ID.'} @@ -285,7 +274,7 @@ class OcapiUtils(object): return response data = response['data'] if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} + return {'ret': False, 'msg': f"Key {key} not found"} if 'ID' not in data[key]: return {'ret': False, 'msg': 'PowerState for resource has no ID.'} @@ -307,7 +296,7 @@ class OcapiUtils(object): if response['ret'] is False: return response else: - return {'ret': False, 'msg': 'Invalid command: ' + command} + return {'ret': False, 'msg': f"Invalid command: {command}"} return {'ret': True} @@ -324,14 +313,14 @@ class OcapiUtils(object): this method sends the file as binary. """ boundary = str(uuid.uuid4()) # Generate a random boundary - body = "--" + boundary + '\r\n' - body += 'Content-Disposition: form-data; name="FirmwareFile"; filename="%s"\r\n' % to_native(os.path.basename(filename)) + body = f"--{boundary}\r\n" + body += f'Content-Disposition: form-data; name="FirmwareFile"; filename="{to_native(os.path.basename(filename))}"\r\n' body += 'Content-Type: application/octet-stream\r\n\r\n' body_bytes = bytearray(body, 'utf-8') with open(filename, 'rb') as f: body_bytes += f.read() - body_bytes += bytearray("\r\n--%s--" % boundary, 'utf-8') - return ("multipart/form-data; boundary=%s" % boundary, + body_bytes += bytearray(f"\r\n--{boundary}--", 'utf-8') + return (f"multipart/form-data; boundary={boundary}", body_bytes) def upload_firmware_image(self, update_image_path): @@ -341,7 +330,7 @@ class OcapiUtils(object): """ if not (os.path.exists(update_image_path) and os.path.isfile(update_image_path)): return {'ret': False, 'msg': 'File does not exist.'} - url = self.root_uri + "OperatingSystem" + url = f"{self.root_uri}OperatingSystem" url = self.get_uri_with_slot_number_query_param(url) content_type, b_form_data = self.prepare_multipart_firmware_upload(update_image_path) @@ -432,7 +421,7 @@ class OcapiUtils(object): else: return response details = response["data"]["Status"].get("Details") - if type(details) is str: + if isinstance(details, str): details = [details] health_list = response["data"]["Status"]["Health"] return_value = { diff --git a/plugins/module_utils/oneandone.py b/plugins/module_utils/oneandone.py index bbad2eaa05..1c9cb73d73 100644 --- a/plugins/module_utils/oneandone.py +++ b/plugins/module_utils/oneandone.py @@ -1,10 +1,8 @@ -# -*- coding: utf-8 -*- # Copyright (c) Ansible project # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import time @@ -211,7 +209,7 @@ def wait_for_resource_creation_completion(oneandone_conn, (resource_type != OneAndOneResources.server and resource_state.lower() == 'active')): return elif resource_state.lower() == 'failed': - raise Exception('%s creation failed for %s' % (resource_type, resource_id)) + raise Exception(f'{resource_type} creation failed for {resource_id}') elif resource_state.lower() in ('active', 'enabled', 'deploying', @@ -219,10 +217,10 @@ def wait_for_resource_creation_completion(oneandone_conn, continue else: raise Exception( - 'Unknown %s state %s' % (resource_type, resource_state)) + f'Unknown {resource_type} state {resource_state}') raise Exception( - 'Timed out waiting for %s completion for %s' % (resource_type, resource_id)) + f'Timed out waiting for {resource_type} completion for {resource_id}') def wait_for_resource_deletion_completion(oneandone_conn, @@ -248,7 +246,7 @@ def wait_for_resource_deletion_completion(oneandone_conn, _type = 'PRIVATENETWORK' else: raise Exception( - 'Unsupported wait_for delete operation for %s resource' % resource_type) + f'Unsupported wait_for delete operation for {resource_type} resource') for log in logs: if (log['resource']['id'] == resource_id and @@ -257,4 +255,4 @@ def wait_for_resource_deletion_completion(oneandone_conn, log['status']['state'] == 'OK'): return raise Exception( - 'Timed out waiting for %s deletion for %s' % (resource_type, resource_id)) + f'Timed out waiting for {resource_type} deletion for {resource_id}') diff --git a/plugins/module_utils/onepassword.py b/plugins/module_utils/onepassword.py index 3023165b1a..5e52a9af41 100644 --- a/plugins/module_utils/onepassword.py +++ b/plugins/module_utils/onepassword.py @@ -1,10 +1,8 @@ -# -*- coding: utf-8 -*- # Copyright (c) Ansible project # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import os diff --git a/plugins/module_utils/oneview.py b/plugins/module_utils/oneview.py index dfd00c514e..1f57355f58 100644 --- a/plugins/module_utils/oneview.py +++ b/plugins/module_utils/oneview.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -10,14 +9,13 @@ # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import abc import collections import json -import os import traceback +from collections.abc import Mapping HPE_ONEVIEW_IMP_ERR = None try: @@ -27,10 +25,8 @@ except ImportError: HPE_ONEVIEW_IMP_ERR = traceback.format_exc() HAS_HPE_ONEVIEW = False -from ansible.module_utils import six from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.common._collections_compat import Mapping def transform_list_to_dict(list_): @@ -131,7 +127,7 @@ class OneViewModuleException(Exception): self.msg = None self.oneview_response = None - if isinstance(data, six.string_types): + if isinstance(data, str): self.msg = data else: self.oneview_response = data @@ -181,8 +177,7 @@ class OneViewModuleResourceNotFound(OneViewModuleException): pass -@six.add_metaclass(abc.ABCMeta) -class OneViewModuleBase(object): +class OneViewModuleBase(object, metaclass=abc.ABCMeta): MSG_CREATED = 'Resource created successfully.' MSG_UPDATED = 'Resource updated successfully.' MSG_DELETED = 'Resource deleted successfully.' @@ -400,11 +395,11 @@ class OneViewModuleBase(object): resource1 = first_resource resource2 = second_resource - debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2) + debug_resources = f"resource1 = {resource1}, resource2 = {resource2}" # The first resource is True / Not Null and the second resource is False / Null if resource1 and not resource2: - self.module.log("resource1 and not resource2. " + debug_resources) + self.module.log(f"resource1 and not resource2. {debug_resources}") return False # Checks all keys in first dict against the second dict @@ -454,15 +449,15 @@ class OneViewModuleBase(object): resource1 = first_resource resource2 = second_resource - debug_resources = "resource1 = {0}, resource2 = {1}".format(resource1, resource2) + debug_resources = f"resource1 = {resource1}, resource2 = {resource2}" # The second list is null / empty / False if not resource2: - self.module.log("resource 2 is null. " + debug_resources) + self.module.log(f"resource 2 is null. {debug_resources}") return False if len(resource1) != len(resource2): - self.module.log("resources have different length. " + debug_resources) + self.module.log(f"resources have different length. {debug_resources}") return False resource1 = sorted(resource1, key=_str_sorted) @@ -472,15 +467,15 @@ class OneViewModuleBase(object): if isinstance(val, Mapping): # change comparison function to compare dictionaries if not self.compare(val, resource2[i]): - self.module.log("resources are different. " + debug_resources) + self.module.log(f"resources are different. {debug_resources}") return False elif isinstance(val, list): # recursive call if not self.compare_list(val, resource2[i]): - self.module.log("lists are different. " + debug_resources) + self.module.log(f"lists are different. {debug_resources}") return False elif _standardize_value(val) != _standardize_value(resource2[i]): - self.module.log("values are different. " + debug_resources) + self.module.log(f"values are different. {debug_resources}") return False # no differences found diff --git a/plugins/module_utils/online.py b/plugins/module_utils/online.py index a2f6e77a03..303abffab2 100644 --- a/plugins/module_utils/online.py +++ b/plugins/module_utils/online.py @@ -1,10 +1,8 @@ -# -*- coding: utf-8 -*- # Copyright (c) Ansible project # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import json import sys diff --git a/plugins/module_utils/opennebula.py b/plugins/module_utils/opennebula.py index 0fe649ba5c..ce9ec76b0d 100644 --- a/plugins/module_utils/opennebula.py +++ b/plugins/module_utils/opennebula.py @@ -1,21 +1,19 @@ -# -*- coding: utf-8 -*- # # Copyright 2018 www.privaz.io Valletech AB # # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import time import ssl from os import environ -from ansible.module_utils.six import string_types from ansible.module_utils.basic import AnsibleModule +IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS'] HAS_PYONE = True try: @@ -45,14 +43,19 @@ def render(to_render): """Converts dictionary to OpenNebula template.""" def recurse(to_render): for key, value in sorted(to_render.items()): + if value is None: + continue if isinstance(value, dict): - yield '{0:}=[{1:}]'.format(key, ','.join(recurse(value))) + yield f"{key}=[{','.join(recurse(value))}]" continue if isinstance(value, list): for item in value: - yield '{0:}=[{1:}]'.format(key, ','.join(recurse(item))) + yield f"{key}=[{','.join(recurse(item))}]" continue - yield '{0:}="{1:}"'.format(key, value) + if isinstance(value, str): + yield '{0:}="{1:}"'.format(key, value.replace('\\', '\\\\').replace('"', '\\"')) + continue + yield f'{key}="{value}"' return '\n'.join(recurse(to_render)) @@ -121,7 +124,7 @@ class OpenNebulaModule: else: self.fail("Either api_password or the environment variable ONE_PASSWORD must be provided") - session = "%s:%s" % (username, password) + session = f"{username}:{password}" if not self.module.params.get("validate_certs") and "PYTHONHTTPSVERIFY" not in environ: return OneServer(url, session=session, context=no_ssl_validation_context) @@ -259,7 +262,7 @@ class OpenNebulaModule: self.cast_template(template[key]) elif isinstance(value, list): template[key] = ', '.join(value) - elif not isinstance(value, string_types): + elif not isinstance(value, str): template[key] = str(value) def requires_template_update(self, current, desired): @@ -309,11 +312,11 @@ class OpenNebulaModule: current_state = state() if current_state in invalid_states: - self.fail('invalid %s state %s' % (element_name, state_name(current_state))) + self.fail(f'invalid {element_name} state {state_name(current_state)}') if transition_states: if current_state not in transition_states: - self.fail('invalid %s transition state %s' % (element_name, state_name(current_state))) + self.fail(f'invalid {element_name} transition state {state_name(current_state)}') if current_state in target_states: return True @@ -331,7 +334,7 @@ class OpenNebulaModule: try: self.run(self.one, self.module, self.result) except OneException as e: - self.fail(msg="OpenNebula Exception: %s" % e) + self.fail(msg=f"OpenNebula Exception: {e}") def run(self, one, module, result): """ @@ -342,3 +345,90 @@ class OpenNebulaModule: result: the Ansible result """ raise NotImplementedError("Method requires implementation") + + def get_image_list_id(self, image, element): + """ + This is a helper function for get_image_info to iterate over a simple list of objects + """ + list_of_id = [] + + if element == 'VMS': + image_list = image.VMS + if element == 'CLONES': + image_list = image.CLONES + if element == 'APP_CLONES': + image_list = image.APP_CLONES + + for iter in image_list.ID: + list_of_id.append( + # These are optional so firstly check for presence + getattr(iter, 'ID', 'Null'), + ) + return list_of_id + + def get_image_snapshots_list(self, image): + """ + This is a helper function for get_image_info to iterate over a dictionary + """ + list_of_snapshots = [] + + for iter in image.SNAPSHOTS.SNAPSHOT: + list_of_snapshots.append({ + 'date': iter['DATE'], + 'parent': iter['PARENT'], + 'size': iter['SIZE'], + # These are optional so firstly check for presence + 'allow_orhans': getattr(image.SNAPSHOTS, 'ALLOW_ORPHANS', 'Null'), + 'children': getattr(iter, 'CHILDREN', 'Null'), + 'active': getattr(iter, 'ACTIVE', 'Null'), + 'name': getattr(iter, 'NAME', 'Null'), + }) + return list_of_snapshots + + def get_image_info(self, image): + """ + This method is used by one_image and one_image_info modules to retrieve + information from XSD scheme of an image + Returns: a copy of the parameters that includes the resolved parameters. + """ + info = { + 'id': image.ID, + 'name': image.NAME, + 'state': IMAGE_STATES[image.STATE], + 'running_vms': image.RUNNING_VMS, + 'used': bool(image.RUNNING_VMS), + 'user_name': image.UNAME, + 'user_id': image.UID, + 'group_name': image.GNAME, + 'group_id': image.GID, + 'permissions': { + 'owner_u': image.PERMISSIONS.OWNER_U, + 'owner_m': image.PERMISSIONS.OWNER_M, + 'owner_a': image.PERMISSIONS.OWNER_A, + 'group_u': image.PERMISSIONS.GROUP_U, + 'group_m': image.PERMISSIONS.GROUP_M, + 'group_a': image.PERMISSIONS.GROUP_A, + 'other_u': image.PERMISSIONS.OTHER_U, + 'other_m': image.PERMISSIONS.OTHER_M, + 'other_a': image.PERMISSIONS.OTHER_A + }, + 'type': image.TYPE, + 'disk_type': image.DISK_TYPE, + 'persistent': image.PERSISTENT, + 'regtime': image.REGTIME, + 'source': image.SOURCE, + 'path': image.PATH, + 'fstype': getattr(image, 'FSTYPE', 'Null'), + 'size': image.SIZE, + 'cloning_ops': image.CLONING_OPS, + 'cloning_id': image.CLONING_ID, + 'target_snapshot': image.TARGET_SNAPSHOT, + 'datastore_id': image.DATASTORE_ID, + 'datastore': image.DATASTORE, + 'vms': self.get_image_list_id(image, 'VMS'), + 'clones': self.get_image_list_id(image, 'CLONES'), + 'app_clones': self.get_image_list_id(image, 'APP_CLONES'), + 'snapshots': self.get_image_snapshots_list(image), + 'template': image.TEMPLATE, + } + return info diff --git a/plugins/module_utils/oracle/oci_utils.py b/plugins/module_utils/oracle/oci_utils.py index 76fb45324b..0910d24cae 100644 --- a/plugins/module_utils/oracle/oci_utils.py +++ b/plugins/module_utils/oracle/oci_utils.py @@ -1,22 +1,27 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017, 2018, 2019 Oracle and/or its affiliates. # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations + +# +# DEPRECATED +# +# This module utils is deprecated and will be removed in community.general 13.0.0 +# import logging import logging.config import os import tempfile -from datetime import datetime +# (TODO: remove next line!) +from datetime import datetime # noqa: F401, pylint: disable=unused-import from operator import eq import time try: - import yaml + import yaml # noqa: F401, pylint: disable=unused-import import oci from oci.constants import HEADER_NEXT_PAGE @@ -41,7 +46,6 @@ except ImportError: from ansible.module_utils.common.text.converters import to_bytes -from ansible.module_utils.six import iteritems __version__ = "1.6.0-dev" @@ -433,7 +437,7 @@ def check_and_update_attributes( target_instance, attr_name, input_value, existing_value, changed ): """ - This function checks the difference between two resource attributes of literal types and sets the attrbute + This function checks the difference between two resource attributes of literal types and sets the attribute value in the target instance type holding the attribute. :param target_instance: The instance which contains the attribute whose values to be compared :param attr_name: Name of the attribute whose value required to be compared @@ -560,7 +564,7 @@ def are_lists_equal(s, t): if s is None and t is None: return True - if (s is None and len(t) >= 0) or (t is None and len(s) >= 0) or (len(s) != len(t)): + if s is None or t is None or (len(s) != len(t)): return False if len(s) == 0: @@ -569,7 +573,7 @@ def are_lists_equal(s, t): s = to_dict(s) t = to_dict(t) - if type(s[0]) == dict: + if isinstance(s[0], dict): # Handle list of dicts. Dictionary returned by the API may have additional keys. For example, a get call on # service gateway has an attribute `services` which is a list of `ServiceIdResponseDetails`. This has a key # `service_name` which is not provided in the list of `services` by a user while making an update call; only @@ -603,9 +607,9 @@ def get_attr_to_update(get_fn, kwargs_get, module, update_attributes): user_provided_attr_value = module.params.get(attr, None) unequal_list_attr = ( - type(resources_attr_value) == list or type(user_provided_attr_value) == list + isinstance(resources_attr_value, list) or isinstance(user_provided_attr_value, list) ) and not are_lists_equal(user_provided_attr_value, resources_attr_value) - unequal_attr = type(resources_attr_value) != list and to_dict( + unequal_attr = not isinstance(resources_attr_value, list) and to_dict( resources_attr_value ) != to_dict(user_provided_attr_value) if unequal_list_attr or unequal_attr: @@ -784,7 +788,7 @@ def _get_attributes_to_consider(exclude_attributes, model, module): attributes_to_consider = list(model.attribute_map) if "freeform_tags" in attributes_to_consider: attributes_to_consider.remove("freeform_tags") - # Temporarily removing node_count as the exisiting resource does not reflect it + # Temporarily removing node_count as the existing resource does not reflect it if "node_count" in attributes_to_consider: attributes_to_consider.remove("node_count") _debug("attributes to consider: {0}".format(attributes_to_consider)) @@ -815,7 +819,7 @@ def is_attr_assigned_default(default_attribute_values, attr, assigned_value): # this is to ensure forward compatibility when the API returns new keys that are not known during # the time when the module author provided default values for the attribute keys = {} - for k, v in iteritems(assigned_value.items()): + for k, v in assigned_value.items().items(): if k in default_val_for_attr: keys[k] = v @@ -935,9 +939,9 @@ def tuplize(d): list_of_tuples = [] key_list = sorted(list(d.keys())) for key in key_list: - if type(d[key]) == list: + if isinstance(d[key], list): # Convert a value which is itself a list of dict to a list of tuples. - if d[key] and type(d[key][0]) == dict: + if d[key] and isinstance(d[key][0], dict): sub_tuples = [] for sub_dict in d[key]: sub_tuples.append(tuplize(sub_dict)) @@ -947,7 +951,7 @@ def tuplize(d): list_of_tuples.append((sub_tuples is None, key, sub_tuples)) else: list_of_tuples.append((d[key] is None, key, d[key])) - elif type(d[key]) == dict: + elif isinstance(d[key], dict): tupled_value = tuplize(d[key]) list_of_tuples.append((tupled_value is None, key, tupled_value)) else: @@ -968,13 +972,13 @@ def sort_dictionary(d): """ sorted_d = {} for key in d: - if type(d[key]) == list: - if d[key] and type(d[key][0]) == dict: + if isinstance(d[key], list): + if d[key] and isinstance(d[key][0], dict): sorted_value = sort_list_of_dictionary(d[key]) sorted_d[key] = sorted_value else: sorted_d[key] = sorted(d[key]) - elif type(d[key]) == dict: + elif isinstance(d[key], dict): sorted_d[key] = sort_dictionary(d[key]) else: sorted_d[key] = d[key] @@ -1025,10 +1029,7 @@ def check_if_user_value_matches_resources_attr( return if ( - resources_value_for_attr is None - and len(user_provided_value_for_attr) >= 0 - or user_provided_value_for_attr is None - and len(resources_value_for_attr) >= 0 + resources_value_for_attr is None or user_provided_value_for_attr is None ): res[0] = False return @@ -1043,7 +1044,7 @@ def check_if_user_value_matches_resources_attr( if ( user_provided_value_for_attr - and type(user_provided_value_for_attr[0]) == dict + and isinstance(user_provided_value_for_attr[0], dict) ): # Process a list of dict sorted_user_provided_value_for_attr = sort_list_of_dictionary( @@ -1531,7 +1532,7 @@ def delete_and_wait( result[resource_type] = resource return result # oci.wait_until() returns an instance of oci.util.Sentinel in case the resource is not found. - if type(wait_response) is not Sentinel: + if not isinstance(wait_response, Sentinel): resource = to_dict(wait_response.data) else: resource["lifecycle_state"] = "DELETED" @@ -1546,7 +1547,7 @@ def delete_and_wait( except ServiceError as ex: # DNS API throws a 400 InvalidParameter when a zone id is provided for zone_name_or_id and if the zone # resource is not available, instead of the expected 404. So working around this for now. - if type(client) == oci.dns.DnsClient: + if isinstance(client, oci.dns.DnsClient): if ex.status == 400 and ex.code == "InvalidParameter": _debug( "Resource {0} with {1} already deleted. So returning changed=False".format( @@ -1773,7 +1774,7 @@ def update_class_type_attr_difference( ): """ Checks the difference and updates an attribute which is represented by a class - instance. Not aplicable if the attribute type is a primitive value. + instance. Not applicable if the attribute type is a primitive value. For example, if a class name is A with an attribute x, then if A.x = X(), then only this method works. :param update_class_details The instance which should be updated if there is change in @@ -1935,7 +1936,7 @@ def get_target_resource_from_list( module, list_resource_fn, target_resource_id=None, **kwargs ): """ - Returns a resource filtered by identifer from a list of resources. This method should be + Returns a resource filtered by identifier from a list of resources. This method should be used as an alternative of 'get resource' method when 'get resource' is nor provided by resource api. This method returns a wrapper of response object but that should not be used as an input to 'wait_until' utility as this is only a partial wrapper of response object. diff --git a/plugins/module_utils/pacemaker.py b/plugins/module_utils/pacemaker.py new file mode 100644 index 0000000000..355fd55cc2 --- /dev/null +++ b/plugins/module_utils/pacemaker.py @@ -0,0 +1,79 @@ +# Copyright (c) 2025, Dexter Le +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +import re + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + +_state_map = { + "present": "create", + "absent": "remove", + "cloned": "clone", + "status": "status", + "enabled": "enable", + "disabled": "disable", + "online": "start", + "offline": "stop", + "maintenance": "set", + "config": "config", + "cleanup": "cleanup", +} + + +def fmt_resource_type(value): + return [":".join(value[k] for k in ['resource_standard', 'resource_provider', 'resource_name'] if value.get(k) is not None)] + + +def fmt_resource_operation(value): + cmd = [] + for op in value: + cmd.append("op") + cmd.append(op.get('operation_action')) + for operation_option in op.get('operation_option'): + cmd.append(operation_option) + + return cmd + + +def fmt_resource_argument(value): + return ['--group' if value['argument_action'] == 'group' else value['argument_action']] + value['argument_option'] + + +def get_pacemaker_maintenance_mode(runner): + with runner("cli_action config") as ctx: + rc, out, err = ctx.run(cli_action="property") + maint_mode_re = re.compile(r"maintenance-mode.*true", re.IGNORECASE) + maintenance_mode_output = [line for line in out.splitlines() if maint_mode_re.search(line)] + return bool(maintenance_mode_output) + + +def pacemaker_runner(module, **kwargs): + runner_command = ['pcs'] + runner = CmdRunner( + module, + command=runner_command, + arg_formats=dict( + cli_action=cmd_runner_fmt.as_list(), + state=cmd_runner_fmt.as_map(_state_map), + name=cmd_runner_fmt.as_list(), + resource_type=cmd_runner_fmt.as_func(fmt_resource_type), + resource_option=cmd_runner_fmt.as_list(), + resource_operation=cmd_runner_fmt.as_func(fmt_resource_operation), + resource_meta=cmd_runner_fmt.stack(cmd_runner_fmt.as_opt_val)("meta"), + resource_argument=cmd_runner_fmt.as_func(fmt_resource_argument), + resource_clone_ids=cmd_runner_fmt.as_list(), + resource_clone_meta=cmd_runner_fmt.as_list(), + apply_all=cmd_runner_fmt.as_bool("--all"), + agent_validation=cmd_runner_fmt.as_bool("--agent-validation"), + wait=cmd_runner_fmt.as_opt_eq_val("--wait"), + config=cmd_runner_fmt.as_fixed("config"), + force=cmd_runner_fmt.as_bool("--force"), + version=cmd_runner_fmt.as_fixed("--version"), + output_format=cmd_runner_fmt.as_opt_eq_val("--output-format"), + ), + **kwargs + ) + return runner diff --git a/plugins/module_utils/pipx.py b/plugins/module_utils/pipx.py index 35804c329a..3d81a6c5f2 100644 --- a/plugins/module_utils/pipx.py +++ b/plugins/module_utils/pipx.py @@ -1,49 +1,119 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2022, Alexei Znamensky # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt as fmt + +import json + + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +pipx_common_argspec = { + "global": dict(type='bool', default=False), + "executable": dict(type='path'), +} _state_map = dict( install='install', + install_all='install-all', present='install', uninstall='uninstall', absent='uninstall', uninstall_all='uninstall-all', inject='inject', + uninject='uninject', upgrade='upgrade', + upgrade_shared='upgrade-shared', upgrade_all='upgrade-all', reinstall='reinstall', reinstall_all='reinstall-all', + pin='pin', + unpin='unpin', ) def pipx_runner(module, command, **kwargs): + arg_formats = dict( + state=cmd_runner_fmt.as_map(_state_map), + name=cmd_runner_fmt.as_list(), + name_source=cmd_runner_fmt.as_func(cmd_runner_fmt.unpack_args(lambda n, s: [s] if s else [n])), + install_apps=cmd_runner_fmt.as_bool("--include-apps"), + install_deps=cmd_runner_fmt.as_bool("--include-deps"), + inject_packages=cmd_runner_fmt.as_list(), + force=cmd_runner_fmt.as_bool("--force"), + include_injected=cmd_runner_fmt.as_bool("--include-injected"), + index_url=cmd_runner_fmt.as_opt_val('--index-url'), + python=cmd_runner_fmt.as_opt_val('--python'), + system_site_packages=cmd_runner_fmt.as_bool("--system-site-packages"), + _list=cmd_runner_fmt.as_fixed(['list', '--include-injected', '--json']), + editable=cmd_runner_fmt.as_bool("--editable"), + pip_args=cmd_runner_fmt.as_opt_eq_val('--pip-args'), + suffix=cmd_runner_fmt.as_opt_val('--suffix'), + spec_metadata=cmd_runner_fmt.as_list(), + version=cmd_runner_fmt.as_fixed('--version'), + ) + arg_formats["global"] = cmd_runner_fmt.as_bool("--global") + runner = CmdRunner( module, command=command, - arg_formats=dict( - - state=fmt.as_map(_state_map), - name=fmt.as_list(), - name_source=fmt.as_func(fmt.unpack_args(lambda n, s: [s] if s else [n])), - install_deps=fmt.as_bool("--include-deps"), - inject_packages=fmt.as_list(), - force=fmt.as_bool("--force"), - include_injected=fmt.as_bool("--include-injected"), - index_url=fmt.as_opt_val('--index-url'), - python=fmt.as_opt_val('--python'), - _list=fmt.as_fixed(['list', '--include-injected', '--json']), - editable=fmt.as_bool("--editable"), - pip_args=fmt.as_opt_val('--pip-args'), - ), - environ_update={'USE_EMOJI': '0'}, + arg_formats=arg_formats, + environ_update={'USE_EMOJI': '0', 'PIPX_USE_EMOJI': '0'}, check_rc=True, **kwargs ) return runner + + +def _make_entry(venv_name, venv, include_injected, include_deps): + entry = { + 'name': venv_name, + 'version': venv['metadata']['main_package']['package_version'], + 'pinned': venv['metadata']['main_package'].get('pinned'), + } + if include_injected: + entry['injected'] = {k: v['package_version'] for k, v in venv['metadata']['injected_packages'].items()} + if include_deps: + entry['dependencies'] = list(venv['metadata']['main_package']['app_paths_of_dependencies']) + return entry + + +def make_process_dict(include_injected, include_deps=False): + def process_dict(rc, out, err): + if not out: + return {} + + results = {} + raw_data = json.loads(out) + for venv_name, venv in raw_data['venvs'].items(): + results[venv_name] = _make_entry(venv_name, venv, include_injected, include_deps) + + return results, raw_data + + return process_dict + + +def make_process_list(mod_helper, **kwargs): + # + # ATTENTION! + # + # The function `make_process_list()` is deprecated and will be removed in community.general 13.0.0 + # + process_dict = make_process_dict(mod_helper, **kwargs) + + def process_list(rc, out, err): + res_dict, raw_data = process_dict(rc, out, err) + + if kwargs.get("include_raw"): + mod_helper.vars.raw_output = raw_data + + return [ + entry + for name, entry in res_dict.items() + if name == kwargs.get("name") + ] + return process_list diff --git a/plugins/module_utils/pkg_req.py b/plugins/module_utils/pkg_req.py new file mode 100644 index 0000000000..13c824440f --- /dev/null +++ b/plugins/module_utils/pkg_req.py @@ -0,0 +1,71 @@ +# Copyright (c) 2025, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from ansible_collections.community.general.plugins.module_utils import deps + + +with deps.declare("packaging"): + from packaging.requirements import Requirement + from packaging.version import parse as parse_version, InvalidVersion + + +class PackageRequirement: + def __init__(self, module, name): + self.module = module + self.parsed_name, self.requirement = self._parse_spec(name) + + def _parse_spec(self, name): + """ + Parse a package name that may include version specifiers using PEP 508. + Returns a tuple of (name, requirement) where requirement is of type packaging.requirements.Requirement and it may be None. + + Example inputs: + "package" + "package>=1.0" + "package>=1.0,<2.0" + "package[extra]>=1.0" + "package[foo,bar]>=1.0,!=1.5" + + :param name: Package name with optional version specifiers and extras + :return: Tuple of (name, requirement) + :raises ValueError: If the package specification is invalid + """ + if not name: + return name, None + + # Quick check for simple package names + if not any(c in name for c in '>= -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -import atexit -import time -import re -import traceback - -PROXMOXER_IMP_ERR = None -try: - from proxmoxer import ProxmoxAPI - HAS_PROXMOXER = True -except ImportError: - HAS_PROXMOXER = False - PROXMOXER_IMP_ERR = traceback.format_exc() - - -from ansible.module_utils.basic import env_fallback, missing_required_lib -from ansible.module_utils.common.text.converters import to_native -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - - -def proxmox_auth_argument_spec(): - return dict( - api_host=dict(type='str', - required=True, - fallback=(env_fallback, ['PROXMOX_HOST']) - ), - api_user=dict(type='str', - required=True, - fallback=(env_fallback, ['PROXMOX_USER']) - ), - api_password=dict(type='str', - no_log=True, - fallback=(env_fallback, ['PROXMOX_PASSWORD']) - ), - api_token_id=dict(type='str', - no_log=False - ), - api_token_secret=dict(type='str', - no_log=True - ), - validate_certs=dict(type='bool', - default=False - ), - ) - - -def proxmox_to_ansible_bool(value): - '''Convert Proxmox representation of a boolean to be ansible-friendly''' - return True if value == 1 else False - - -def ansible_to_proxmox_bool(value): - '''Convert Ansible representation of a boolean to be proxmox-friendly''' - if value is None: - return None - - if not isinstance(value, bool): - raise ValueError("%s must be of type bool not %s" % (value, type(value))) - - return 1 if value else 0 - - -class ProxmoxAnsible(object): - """Base class for Proxmox modules""" - def __init__(self, module): - if not HAS_PROXMOXER: - module.fail_json(msg=missing_required_lib('proxmoxer'), exception=PROXMOXER_IMP_ERR) - - self.module = module - self.proxmox_api = self._connect() - # Test token validity - try: - self.proxmox_api.version.get() - except Exception as e: - module.fail_json(msg='%s' % e, exception=traceback.format_exc()) - - def _connect(self): - api_host = self.module.params['api_host'] - api_user = self.module.params['api_user'] - api_password = self.module.params['api_password'] - api_token_id = self.module.params['api_token_id'] - api_token_secret = self.module.params['api_token_secret'] - validate_certs = self.module.params['validate_certs'] - - auth_args = {'user': api_user} - if api_password: - auth_args['password'] = api_password - else: - auth_args['token_name'] = api_token_id - auth_args['token_value'] = api_token_secret - - try: - return ProxmoxAPI(api_host, verify_ssl=validate_certs, **auth_args) - except Exception as e: - self.module.fail_json(msg='%s' % e, exception=traceback.format_exc()) - - def version(self): - apireturn = self.proxmox_api.version.get() - return LooseVersion(apireturn['version']) - - def get_node(self, node): - nodes = [n for n in self.proxmox_api.nodes.get() if n['node'] == node] - return nodes[0] if nodes else None - - def get_nextvmid(self): - vmid = self.proxmox_api.cluster.nextid.get() - return vmid - - def get_vmid(self, name, ignore_missing=False, choose_first_if_multiple=False): - vms = [vm['vmid'] for vm in self.proxmox_api.cluster.resources.get(type='vm') if vm.get('name') == name] - - if not vms: - if ignore_missing: - return None - - self.module.fail_json(msg='No VM with name %s found' % name) - elif len(vms) > 1: - self.module.fail_json(msg='Multiple VMs with name %s found, provide vmid instead' % name) - - return vms[0] - - def get_vm(self, vmid, ignore_missing=False): - vms = [vm for vm in self.proxmox_api.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)] - - if vms: - return vms[0] - else: - if ignore_missing: - return None - - self.module.fail_json(msg='VM with vmid %s does not exist in cluster' % vmid) - - def api_task_ok(self, node, taskid): - status = self.proxmox_api.nodes(node).tasks(taskid).status.get() - return status['status'] == 'stopped' and status['exitstatus'] == 'OK' diff --git a/plugins/module_utils/puppet.py b/plugins/module_utils/puppet.py index 06369882fb..3b093d8c9d 100644 --- a/plugins/module_utils/puppet.py +++ b/plugins/module_utils/puppet.py @@ -1,10 +1,8 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2022, Alexei Znamensky # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import os @@ -63,11 +61,7 @@ def puppet_runner(module): return cmd def noop_func(v): - _noop = cmd_runner_fmt.as_map({ - True: "--noop", - False: "--no-noop", - }) - return _noop(module.check_mode or v) + return ["--noop"] if module.check_mode or v else ["--no-noop"] _logdest_map = { "syslog": ["--logdest", "syslog"], @@ -96,19 +90,19 @@ def puppet_runner(module): confdir=cmd_runner_fmt.as_opt_val("--confdir"), environment=cmd_runner_fmt.as_opt_val("--environment"), tags=cmd_runner_fmt.as_func(lambda v: ["--tags", ",".join(v)]), + skip_tags=cmd_runner_fmt.as_func(lambda v: ["--skip_tags", ",".join(v)]), certname=cmd_runner_fmt.as_opt_eq_val("--certname"), noop=cmd_runner_fmt.as_func(noop_func), - use_srv_records=cmd_runner_fmt.as_map({ - True: "--usr_srv_records", - False: "--no-usr_srv_records", - }), + use_srv_records=cmd_runner_fmt.as_bool("--usr_srv_records", "--no-usr_srv_records", ignore_none=True), logdest=cmd_runner_fmt.as_map(_logdest_map, default=[]), modulepath=cmd_runner_fmt.as_opt_eq_val("--modulepath"), _execute=cmd_runner_fmt.as_func(execute_func), summarize=cmd_runner_fmt.as_bool("--summarize"), + waitforlock=cmd_runner_fmt.as_opt_val("--waitforlock"), debug=cmd_runner_fmt.as_bool("--debug"), verbose=cmd_runner_fmt.as_bool("--verbose"), ), check_rc=False, + force_lang=module.params["environment_lang"], ) return runner diff --git a/plugins/module_utils/pure.py b/plugins/module_utils/pure.py deleted file mode 100644 index c9914c38b5..0000000000 --- a/plugins/module_utils/pure.py +++ /dev/null @@ -1,113 +0,0 @@ -# -*- coding: utf-8 -*- - -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by Ansible -# still belong to the author of the module, and may assign their own license -# to the complete work. -# -# Copyright (c), Simon Dodsley ,2017 -# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) -# SPDX-License-Identifier: BSD-2-Clause - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -HAS_PURESTORAGE = True -try: - from purestorage import purestorage -except ImportError: - HAS_PURESTORAGE = False - -HAS_PURITY_FB = True -try: - from purity_fb import PurityFb, FileSystem, FileSystemSnapshot, SnapshotSuffix, rest -except ImportError: - HAS_PURITY_FB = False - -from functools import wraps -from os import environ -from os import path -import platform - -VERSION = 1.2 -USER_AGENT_BASE = 'Ansible' -API_AGENT_VERSION = 1.5 - - -def get_system(module): - """Return System Object or Fail""" - user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % { - 'base': USER_AGENT_BASE, - 'class': __name__, - 'version': VERSION, - 'platform': platform.platform() - } - array_name = module.params['fa_url'] - api = module.params['api_token'] - - if array_name and api: - system = purestorage.FlashArray(array_name, api_token=api, user_agent=user_agent) - elif environ.get('PUREFA_URL') and environ.get('PUREFA_API'): - system = purestorage.FlashArray(environ.get('PUREFA_URL'), api_token=(environ.get('PUREFA_API')), user_agent=user_agent) - else: - module.fail_json(msg="You must set PUREFA_URL and PUREFA_API environment variables or the fa_url and api_token module arguments") - try: - system.get() - except Exception: - module.fail_json(msg="Pure Storage FlashArray authentication failed. Check your credentials") - return system - - -def get_blade(module): - """Return System Object or Fail""" - user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % { - 'base': USER_AGENT_BASE, - 'class': __name__, - 'version': VERSION, - 'platform': platform.platform() - } - blade_name = module.params['fb_url'] - api = module.params['api_token'] - - if blade_name and api: - blade = PurityFb(blade_name) - blade.disable_verify_ssl() - try: - blade.login(api) - versions = blade.api_version.list_versions().versions - if API_AGENT_VERSION in versions: - blade._api_client.user_agent = user_agent - except rest.ApiException as e: - module.fail_json(msg="Pure Storage FlashBlade authentication failed. Check your credentials") - elif environ.get('PUREFB_URL') and environ.get('PUREFB_API'): - blade = PurityFb(environ.get('PUREFB_URL')) - blade.disable_verify_ssl() - try: - blade.login(environ.get('PUREFB_API')) - versions = blade.api_version.list_versions().versions - if API_AGENT_VERSION in versions: - blade._api_client.user_agent = user_agent - except rest.ApiException as e: - module.fail_json(msg="Pure Storage FlashBlade authentication failed. Check your credentials") - else: - module.fail_json(msg="You must set PUREFB_URL and PUREFB_API environment variables or the fb_url and api_token module arguments") - return blade - - -def purefa_argument_spec(): - """Return standard base dictionary used for the argument_spec argument in AnsibleModule""" - - return dict( - fa_url=dict(), - api_token=dict(no_log=True), - ) - - -def purefb_argument_spec(): - """Return standard base dictionary used for the argument_spec argument in AnsibleModule""" - - return dict( - fb_url=dict(), - api_token=dict(no_log=True), - ) diff --git a/plugins/module_utils/python_runner.py b/plugins/module_utils/python_runner.py new file mode 100644 index 0000000000..7d9b94f50e --- /dev/null +++ b/plugins/module_utils/python_runner.py @@ -0,0 +1,34 @@ +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +import os + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, _ensure_list + + +class PythonRunner(CmdRunner): + def __init__(self, module, command, arg_formats=None, default_args_order=(), + check_rc=False, force_lang="C", path_prefix=None, environ_update=None, + python="python", venv=None): + self.python = python + self.venv = venv + self.has_venv = venv is not None + + if os.path.isabs(python) or '/' in python: + self.python = python + elif self.has_venv: + if path_prefix is None: + path_prefix = [] + path_prefix.append(os.path.join(venv, "bin")) + if environ_update is None: + environ_update = {} + environ_update["PATH"] = f"{':'.join(path_prefix)}:{os.environ['PATH']}" + environ_update["VIRTUAL_ENV"] = venv + + python_cmd = [self.python] + _ensure_list(command) + + super(PythonRunner, self).__init__(module, python_cmd, arg_formats, default_args_order, + check_rc, force_lang, path_prefix, environ_update) diff --git a/plugins/module_utils/rax.py b/plugins/module_utils/rax.py deleted file mode 100644 index 6331c0d1be..0000000000 --- a/plugins/module_utils/rax.py +++ /dev/null @@ -1,334 +0,0 @@ -# -*- coding: utf-8 -*- -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by -# Ansible still belong to the author of the module, and may assign their own -# license to the complete work. -# -# Copyright (c), Michael DeHaan , 2012-2013 -# -# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) -# SPDX-License-Identifier: BSD-2-Clause - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -import os -import re -from uuid import UUID - -from ansible.module_utils.six import text_type, binary_type - -FINAL_STATUSES = ('ACTIVE', 'ERROR') -VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use', - 'error', 'error_deleting') - -CLB_ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN', - 'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN'] -CLB_PROTOCOLS = ['DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS', 'IMAPS', - 'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP', - 'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP'] - -NON_CALLABLES = (text_type, binary_type, bool, dict, int, list, type(None)) -PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000" -SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111" - - -def rax_slugify(value): - """Prepend a key with rax_ and normalize the key name""" - return 'rax_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_')) - - -def rax_clb_node_to_dict(obj): - """Function to convert a CLB Node object to a dict""" - if not obj: - return {} - node = obj.to_dict() - node['id'] = obj.id - node['weight'] = obj.weight - return node - - -def rax_to_dict(obj, obj_type='standard'): - """Generic function to convert a pyrax object to a dict - - obj_type values: - standard - clb - server - - """ - instance = {} - for key in dir(obj): - value = getattr(obj, key) - if obj_type == 'clb' and key == 'nodes': - instance[key] = [] - for node in value: - instance[key].append(rax_clb_node_to_dict(node)) - elif (isinstance(value, list) and len(value) > 0 and - not isinstance(value[0], NON_CALLABLES)): - instance[key] = [] - for item in value: - instance[key].append(rax_to_dict(item)) - elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')): - if obj_type == 'server': - if key == 'image': - if not value: - instance['rax_boot_source'] = 'volume' - else: - instance['rax_boot_source'] = 'local' - key = rax_slugify(key) - instance[key] = value - - if obj_type == 'server': - for attr in ['id', 'accessIPv4', 'name', 'status']: - instance[attr] = instance.get(rax_slugify(attr)) - - return instance - - -def rax_find_bootable_volume(module, rax_module, server, exit=True): - """Find a servers bootable volume""" - cs = rax_module.cloudservers - cbs = rax_module.cloud_blockstorage - server_id = rax_module.utils.get_id(server) - volumes = cs.volumes.get_server_volumes(server_id) - bootable_volumes = [] - for volume in volumes: - vol = cbs.get(volume) - if module.boolean(vol.bootable): - bootable_volumes.append(vol) - if not bootable_volumes: - if exit: - module.fail_json(msg='No bootable volumes could be found for ' - 'server %s' % server_id) - else: - return False - elif len(bootable_volumes) > 1: - if exit: - module.fail_json(msg='Multiple bootable volumes found for server ' - '%s' % server_id) - else: - return False - - return bootable_volumes[0] - - -def rax_find_image(module, rax_module, image, exit=True): - """Find a server image by ID or Name""" - cs = rax_module.cloudservers - try: - UUID(image) - except ValueError: - try: - image = cs.images.find(human_id=image) - except (cs.exceptions.NotFound, cs.exceptions.NoUniqueMatch): - try: - image = cs.images.find(name=image) - except (cs.exceptions.NotFound, - cs.exceptions.NoUniqueMatch): - if exit: - module.fail_json(msg='No matching image found (%s)' % - image) - else: - return False - - return rax_module.utils.get_id(image) - - -def rax_find_volume(module, rax_module, name): - """Find a Block storage volume by ID or name""" - cbs = rax_module.cloud_blockstorage - try: - UUID(name) - volume = cbs.get(name) - except ValueError: - try: - volume = cbs.find(name=name) - except rax_module.exc.NotFound: - volume = None - except Exception as e: - module.fail_json(msg='%s' % e) - return volume - - -def rax_find_network(module, rax_module, network): - """Find a cloud network by ID or name""" - cnw = rax_module.cloud_networks - try: - UUID(network) - except ValueError: - if network.lower() == 'public': - return cnw.get_server_networks(PUBLIC_NET_ID) - elif network.lower() == 'private': - return cnw.get_server_networks(SERVICE_NET_ID) - else: - try: - network_obj = cnw.find_network_by_label(network) - except (rax_module.exceptions.NetworkNotFound, - rax_module.exceptions.NetworkLabelNotUnique): - module.fail_json(msg='No matching network found (%s)' % - network) - else: - return cnw.get_server_networks(network_obj) - else: - return cnw.get_server_networks(network) - - -def rax_find_server(module, rax_module, server): - """Find a Cloud Server by ID or name""" - cs = rax_module.cloudservers - try: - UUID(server) - server = cs.servers.get(server) - except ValueError: - servers = cs.servers.list(search_opts=dict(name='^%s$' % server)) - if not servers: - module.fail_json(msg='No Server was matched by name, ' - 'try using the Server ID instead') - if len(servers) > 1: - module.fail_json(msg='Multiple servers matched by name, ' - 'try using the Server ID instead') - - # We made it this far, grab the first and hopefully only server - # in the list - server = servers[0] - return server - - -def rax_find_loadbalancer(module, rax_module, loadbalancer): - """Find a Cloud Load Balancer by ID or name""" - clb = rax_module.cloud_loadbalancers - try: - found = clb.get(loadbalancer) - except Exception: - found = [] - for lb in clb.list(): - if loadbalancer == lb.name: - found.append(lb) - - if not found: - module.fail_json(msg='No loadbalancer was matched') - - if len(found) > 1: - module.fail_json(msg='Multiple loadbalancers matched') - - # We made it this far, grab the first and hopefully only item - # in the list - found = found[0] - - return found - - -def rax_argument_spec(): - """Return standard base dictionary used for the argument_spec - argument in AnsibleModule - - """ - return dict( - api_key=dict(type='str', aliases=['password'], no_log=True), - auth_endpoint=dict(type='str'), - credentials=dict(type='path', aliases=['creds_file']), - env=dict(type='str'), - identity_type=dict(type='str', default='rackspace'), - region=dict(type='str'), - tenant_id=dict(type='str'), - tenant_name=dict(type='str'), - username=dict(type='str'), - validate_certs=dict(type='bool', aliases=['verify_ssl']), - ) - - -def rax_required_together(): - """Return the default list used for the required_together argument to - AnsibleModule""" - return [['api_key', 'username']] - - -def setup_rax_module(module, rax_module, region_required=True): - """Set up pyrax in a standard way for all modules""" - rax_module.USER_AGENT = 'ansible/%s %s' % (module.ansible_version, - rax_module.USER_AGENT) - - api_key = module.params.get('api_key') - auth_endpoint = module.params.get('auth_endpoint') - credentials = module.params.get('credentials') - env = module.params.get('env') - identity_type = module.params.get('identity_type') - region = module.params.get('region') - tenant_id = module.params.get('tenant_id') - tenant_name = module.params.get('tenant_name') - username = module.params.get('username') - verify_ssl = module.params.get('validate_certs') - - if env is not None: - rax_module.set_environment(env) - - rax_module.set_setting('identity_type', identity_type) - if verify_ssl is not None: - rax_module.set_setting('verify_ssl', verify_ssl) - if auth_endpoint is not None: - rax_module.set_setting('auth_endpoint', auth_endpoint) - if tenant_id is not None: - rax_module.set_setting('tenant_id', tenant_id) - if tenant_name is not None: - rax_module.set_setting('tenant_name', tenant_name) - - try: - username = username or os.environ.get('RAX_USERNAME') - if not username: - username = rax_module.get_setting('keyring_username') - if username: - api_key = 'USE_KEYRING' - if not api_key: - api_key = os.environ.get('RAX_API_KEY') - credentials = (credentials or os.environ.get('RAX_CREDENTIALS') or - os.environ.get('RAX_CREDS_FILE')) - region = (region or os.environ.get('RAX_REGION') or - rax_module.get_setting('region')) - except KeyError as e: - module.fail_json(msg='Unable to load %s' % e.message) - - try: - if api_key and username: - if api_key == 'USE_KEYRING': - rax_module.keyring_auth(username, region=region) - else: - rax_module.set_credentials(username, api_key=api_key, - region=region) - elif credentials: - credentials = os.path.expanduser(credentials) - rax_module.set_credential_file(credentials, region=region) - else: - raise Exception('No credentials supplied!') - except Exception as e: - if e.message: - msg = str(e.message) - else: - msg = repr(e) - module.fail_json(msg=msg) - - if region_required and region not in rax_module.regions: - module.fail_json(msg='%s is not a valid region, must be one of: %s' % - (region, ','.join(rax_module.regions))) - - return rax_module - - -def rax_scaling_group_personality_file(module, files): - if not files: - return [] - - results = [] - for rpath, lpath in files.items(): - lpath = os.path.expanduser(lpath) - try: - with open(lpath, 'r') as f: - results.append({ - 'path': rpath, - 'contents': f.read(), - }) - except Exception as e: - module.fail_json(msg='Failed to load %s: %s' % (lpath, str(e))) - return results diff --git a/plugins/module_utils/redfish_utils.py b/plugins/module_utils/redfish_utils.py index eadca28205..ab551b44c5 100644 --- a/plugins/module_utils/redfish_utils.py +++ b/plugins/module_utils/redfish_utils.py @@ -1,18 +1,21 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2017-2018 Dell EMC Inc. # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations +import http.client as http_client import json +import os +import random +import string +import time from ansible.module_utils.urls import open_url from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.common.text.converters import to_text -from ansible.module_utils.six.moves import http_client -from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError -from ansible.module_utils.six.moves.urllib.parse import urlparse +from ansible.module_utils.common.text.converters import to_bytes +from urllib.error import URLError, HTTPError +from urllib.parse import urlparse GET_HEADERS = {'accept': 'application/json', 'OData-Version': '4.0'} POST_HEADERS = {'content-type': 'application/json', 'accept': 'application/json', @@ -28,11 +31,26 @@ FAIL_MSG = 'Issuing a data modification command without specifying the '\ 'than one %(resource)s is no longer allowed. Use the `resource_id` '\ 'option to specify the target %(resource)s ID.' +# Use together with the community.general.redfish docs fragment +REDFISH_COMMON_ARGUMENT_SPEC = { + "validate_certs": { + "type": "bool", + "default": False, + }, + "ca_path": { + "type": "path", + }, + "ciphers": { + "type": "list", + "elements": "str", + }, +} + class RedfishUtils(object): def __init__(self, creds, root_uri, timeout, module, resource_id=None, - data_modification=False, strip_etag_quotes=False): + data_modification=False, strip_etag_quotes=False, ciphers=None): self.root_uri = root_uri self.creds = creds self.timeout = timeout @@ -43,8 +61,10 @@ class RedfishUtils(object): self.resource_id = resource_id self.data_modification = data_modification self.strip_etag_quotes = strip_etag_quotes + self.ciphers = ciphers if ciphers is not None else module.params.get("ciphers") self._vendor = None - self._init_session() + self.validate_certs = module.params.get("validate_certs", False) + self.ca_path = module.params.get("ca_path") def _auth_params(self, headers): """ @@ -109,7 +129,7 @@ class RedfishUtils(object): # Note: This is also a fallthrough for properties that are # arrays of objects. Some services erroneously omit properties - # within arrays of objects when not configured, and it's + # within arrays of objects when not configured, and it is # expecting the client to provide them anyway. if req_pyld[prop] != cur_pyld[prop]: @@ -122,28 +142,52 @@ class RedfishUtils(object): resp['msg'] = 'Properties in %s are already set' % uri return resp + def _request(self, uri, **kwargs): + kwargs.setdefault("validate_certs", self.validate_certs) + kwargs.setdefault("follow_redirects", "all") + kwargs.setdefault("use_proxy", True) + kwargs.setdefault("timeout", self.timeout) + kwargs.setdefault("ciphers", self.ciphers) + kwargs.setdefault("ca_path", self.ca_path) + resp = open_url(uri, **kwargs) + headers = {k.lower(): v for (k, v) in resp.info().items()} + return resp, headers + # The following functions are to send GET/POST/PATCH/DELETE requests - def get_request(self, uri): + def get_request(self, uri, override_headers=None, allow_no_resp=False, timeout=None): req_headers = dict(GET_HEADERS) + if override_headers: + req_headers.update(override_headers) username, password, basic_auth = self._auth_params(req_headers) + if timeout is None: + timeout = self.timeout try: # Service root is an unauthenticated resource; remove credentials # in case the caller will be using sessions later. if uri == (self.root_uri + self.service_root): basic_auth = False - resp = open_url(uri, method="GET", headers=req_headers, - url_username=username, url_password=password, - force_basic_auth=basic_auth, validate_certs=False, - follow_redirects='all', - use_proxy=True, timeout=self.timeout) - data = json.loads(to_native(resp.read())) - headers = dict((k.lower(), v) for (k, v) in resp.info().items()) + resp, headers = self._request( + uri, + method="GET", + headers=req_headers, + url_username=username, + url_password=password, + force_basic_auth=basic_auth, + timeout=timeout, + ) + try: + data = json.loads(to_native(resp.read())) + except Exception as e: + # No response data; this is okay in certain cases + data = None + if not allow_no_resp: + raise except HTTPError as e: - msg = self._get_extended_message(e) + msg, data = self._get_extended_message(e) return {'ret': False, 'msg': "HTTP Error %s on GET request to '%s', extended message: '%s'" % (e.code, uri, msg), - 'status': e.code} + 'status': e.code, 'data': data} except URLError as e: return {'ret': False, 'msg': "URL Error on GET request to '%s': '%s'" % (uri, e.reason)} @@ -153,7 +197,7 @@ class RedfishUtils(object): 'msg': "Failed GET request to '%s': '%s'" % (uri, to_text(e))} return {'ret': True, 'data': data, 'headers': headers, 'resp': resp} - def post_request(self, uri, pyld): + def post_request(self, uri, pyld, multipart=False): req_headers = dict(POST_HEADERS) username, password, basic_auth = self._auth_params(req_headers) try: @@ -162,24 +206,33 @@ class RedfishUtils(object): # header since this can cause conflicts with some services if self.sessions_uri is not None and uri == (self.root_uri + self.sessions_uri): basic_auth = False - resp = open_url(uri, data=json.dumps(pyld), - headers=req_headers, method="POST", - url_username=username, url_password=password, - force_basic_auth=basic_auth, validate_certs=False, - follow_redirects='all', - use_proxy=True, timeout=self.timeout) + if multipart: + # Multipart requests require special handling to encode the request body + multipart_encoder = self._prepare_multipart(pyld) + data = multipart_encoder[0] + req_headers['content-type'] = multipart_encoder[1] + else: + data = json.dumps(pyld) + resp, headers = self._request( + uri, + data=data, + headers=req_headers, + method="POST", + url_username=username, + url_password=password, + force_basic_auth=basic_auth, + ) try: data = json.loads(to_native(resp.read())) except Exception as e: # No response data; this is okay in many cases data = None - headers = dict((k.lower(), v) for (k, v) in resp.info().items()) except HTTPError as e: - msg = self._get_extended_message(e) + msg, data = self._get_extended_message(e) return {'ret': False, 'msg': "HTTP Error %s on POST request to '%s', extended message: '%s'" % (e.code, uri, msg), - 'status': e.code} + 'status': e.code, 'data': data} except URLError as e: return {'ret': False, 'msg': "URL Error on POST request to '%s': '%s'" % (uri, e.reason)} @@ -216,18 +269,21 @@ class RedfishUtils(object): username, password, basic_auth = self._auth_params(req_headers) try: - resp = open_url(uri, data=json.dumps(pyld), - headers=req_headers, method="PATCH", - url_username=username, url_password=password, - force_basic_auth=basic_auth, validate_certs=False, - follow_redirects='all', - use_proxy=True, timeout=self.timeout) + resp, dummy = self._request( + uri, + data=json.dumps(pyld), + headers=req_headers, + method="PATCH", + url_username=username, + url_password=password, + force_basic_auth=basic_auth, + ) except HTTPError as e: - msg = self._get_extended_message(e) + msg, data = self._get_extended_message(e) return {'ret': False, 'changed': False, 'msg': "HTTP Error %s on PATCH request to '%s', extended message: '%s'" % (e.code, uri, msg), - 'status': e.code} + 'status': e.code, 'data': data} except URLError as e: return {'ret': False, 'changed': False, 'msg': "URL Error on PATCH request to '%s': '%s'" % (uri, e.reason)} @@ -251,18 +307,21 @@ class RedfishUtils(object): req_headers['If-Match'] = etag username, password, basic_auth = self._auth_params(req_headers) try: - resp = open_url(uri, data=json.dumps(pyld), - headers=req_headers, method="PUT", - url_username=username, url_password=password, - force_basic_auth=basic_auth, validate_certs=False, - follow_redirects='all', - use_proxy=True, timeout=self.timeout) + resp, dummy = self._request( + uri, + data=json.dumps(pyld), + headers=req_headers, + method="PUT", + url_username=username, + url_password=password, + force_basic_auth=basic_auth, + ) except HTTPError as e: - msg = self._get_extended_message(e) + msg, data = self._get_extended_message(e) return {'ret': False, 'msg': "HTTP Error %s on PUT request to '%s', extended message: '%s'" % (e.code, uri, msg), - 'status': e.code} + 'status': e.code, 'data': data} except URLError as e: return {'ret': False, 'msg': "URL Error on PUT request to '%s': '%s'" % (uri, e.reason)} @@ -277,18 +336,21 @@ class RedfishUtils(object): username, password, basic_auth = self._auth_params(req_headers) try: data = json.dumps(pyld) if pyld else None - resp = open_url(uri, data=data, - headers=req_headers, method="DELETE", - url_username=username, url_password=password, - force_basic_auth=basic_auth, validate_certs=False, - follow_redirects='all', - use_proxy=True, timeout=self.timeout) + resp, dummy = self._request( + uri, + data=data, + headers=req_headers, + method="DELETE", + url_username=username, + url_password=password, + force_basic_auth=basic_auth, + ) except HTTPError as e: - msg = self._get_extended_message(e) + msg, data = self._get_extended_message(e) return {'ret': False, 'msg': "HTTP Error %s on DELETE request to '%s', extended message: '%s'" % (e.code, uri, msg), - 'status': e.code} + 'status': e.code, 'data': data} except URLError as e: return {'ret': False, 'msg': "URL Error on DELETE request to '%s': '%s'" % (uri, e.reason)} @@ -298,6 +360,59 @@ class RedfishUtils(object): 'msg': "Failed DELETE request to '%s': '%s'" % (uri, to_text(e))} return {'ret': True, 'resp': resp} + @staticmethod + def _prepare_multipart(fields): + """Prepares a multipart body based on a set of fields provided. + + Ideally it would have been good to use the existing 'prepare_multipart' + found in ansible.module_utils.urls, but it takes files and encodes them + as Base64 strings, which is not expected by Redfish services. It also + adds escaping of certain bytes in the payload, such as inserting '\r' + any time it finds a standalone '\n', which corrupts the image payload + send to the service. This implementation is simplified to Redfish's + usage and doesn't necessarily represent an exhaustive method of + building multipart requests. + """ + + def write_buffer(body, line): + # Adds to the multipart body based on the provided data type + # At this time there is only support for strings, dictionaries, and bytes (default) + if isinstance(line, str): + body.append(to_bytes(line, encoding='utf-8')) + elif isinstance(line, dict): + body.append(to_bytes(json.dumps(line), encoding='utf-8')) + else: + body.append(line) + return + + # Generate a random boundary marker; may need to consider probing the + # payload for potential conflicts in the future + boundary = ''.join(random.choice(string.digits + string.ascii_letters) for i in range(30)) + body = [] + for form in fields: + # Fill in the form details + write_buffer(body, '--' + boundary) + + # Insert the headers (Content-Disposition and Content-Type) + if 'filename' in fields[form]: + name = os.path.basename(fields[form]['filename']).replace('"', '\\"') + write_buffer(body, 'Content-Disposition: form-data; name="%s"; filename="%s"' % (to_text(form), to_text(name))) + else: + write_buffer(body, 'Content-Disposition: form-data; name="%s"' % form) + write_buffer(body, 'Content-Type: %s' % fields[form]['mime_type']) + write_buffer(body, '') + + # Insert the payload; read from the file if not given by the caller + if 'content' not in fields[form]: + with open(to_bytes(fields[form]['filename'], errors='surrogate_or_strict'), 'rb') as f: + fields[form]['content'] = f.read() + write_buffer(body, fields[form]['content']) + + # Finalize the entire request + write_buffer(body, '--' + boundary + '--') + write_buffer(body, '') + return (b'\r\n'.join(body), 'multipart/form-data; boundary=' + boundary) + @staticmethod def _get_extended_message(error): """ @@ -305,8 +420,10 @@ class RedfishUtils(object): :param error: an HTTPError exception :type error: HTTPError :return: the ExtendedInfo message if present, else standard HTTP error + :return: the JSON data of the response if present """ msg = http_client.responses.get(error.code, '') + data = None if error.code >= 400: try: body = error.read().decode('utf-8') @@ -320,10 +437,7 @@ class RedfishUtils(object): msg = str(data['error']['@Message.ExtendedInfo']) except Exception: pass - return msg - - def _init_session(self): - pass + return msg, data def _get_vendor(self): # If we got the vendor info once, don't get it again @@ -448,9 +562,9 @@ class RedfishUtils(object): data = response['data'] self.firmware_uri = self.software_uri = None if 'FirmwareInventory' in data: - self.firmware_uri = data['FirmwareInventory'][u'@odata.id'] + self.firmware_uri = data['FirmwareInventory']['@odata.id'] if 'SoftwareInventory' in data: - self.software_uri = data['SoftwareInventory'][u'@odata.id'] + self.software_uri = data['SoftwareInventory']['@odata.id'] return {'ret': True} def _find_chassis_resource(self): @@ -524,12 +638,13 @@ class RedfishUtils(object): data = response['data'] if 'Parameters' in data: params = data['Parameters'] - ai = dict((p['Name'], p) - for p in params if 'Name' in p) + ai = {p['Name']: p for p in params if 'Name' in p} if not ai: - ai = dict((k[:-24], - {'AllowableValues': v}) for k, v in action.items() - if k.endswith('@Redfish.AllowableValues')) + ai = { + k[:-24]: {'AllowableValues': v} + for k, v in action.items() + if k.endswith('@Redfish.AllowableValues') + } return ai def _get_allowable_values(self, action, name, default_values=None): @@ -542,6 +657,24 @@ class RedfishUtils(object): allowable_values = default_values return allowable_values + def check_service_availability(self): + """ + Checks if the service is accessible. + + :return: dict containing the status of the service + """ + + # Get the service root + # Override the timeout since the service root is expected to be readily + # available. + service_root = self.get_request(self.root_uri + self.service_root, timeout=10) + if service_root['ret'] is False: + # Failed, either due to a timeout or HTTP error; not available + return {'ret': True, 'available': False} + + # Successfully accessed the service root; available + return {'ret': True, 'available': True} + def get_logs(self): log_svcs_uri_list = [] list_of_logs = [] @@ -563,12 +696,12 @@ class RedfishUtils(object): return response data = response['data'] for log_svcs_entry in data.get('Members', []): - response = self.get_request(self.root_uri + log_svcs_entry[u'@odata.id']) + response = self.get_request(self.root_uri + log_svcs_entry['@odata.id']) if response['ret'] is False: return response _data = response['data'] if 'Entries' in _data: - log_svcs_uri_list.append(_data['Entries'][u'@odata.id']) + log_svcs_uri_list.append(_data['Entries']['@odata.id']) # For each entry in LogServices, get log name and all log entries for log_svcs_uri in log_svcs_uri_list: @@ -588,7 +721,7 @@ class RedfishUtils(object): entry[prop] = logEntry.get(prop) if entry: list_of_log_entries.append(entry) - log_name = log_svcs_uri.split('/')[-1] + log_name = log_svcs_uri.rstrip('/').split('/')[-1] logs[log_name] = list_of_log_entries list_of_logs.append(logs) @@ -611,15 +744,15 @@ class RedfishUtils(object): return response data = response['data'] - for log_svcs_entry in data[u'Members']: + for log_svcs_entry in data['Members']: response = self.get_request(self.root_uri + log_svcs_entry["@odata.id"]) if response['ret'] is False: return response _data = response['data'] # Check to make sure option is available, otherwise error is ugly if "Actions" in _data: - if "#LogService.ClearLog" in _data[u"Actions"]: - self.post_request(self.root_uri + _data[u"Actions"]["#LogService.ClearLog"]["target"], {}) + if "#LogService.ClearLog" in _data["Actions"]: + self.post_request(self.root_uri + _data["Actions"]["#LogService.ClearLog"]["target"], {}) if response['ret'] is False: return response return {'ret': True} @@ -652,7 +785,8 @@ class RedfishUtils(object): properties = ['CacheSummary', 'FirmwareVersion', 'Identifiers', 'Location', 'Manufacturer', 'Model', 'Name', 'Id', 'PartNumber', 'SerialNumber', 'SpeedGbps', 'Status'] - key = "StorageControllers" + key = "Controllers" + deprecated_key = "StorageControllers" # Find Storage service response = self.get_request(self.root_uri + systems_uri) @@ -673,14 +807,37 @@ class RedfishUtils(object): # Loop through Members and their StorageControllers # and gather properties from each StorageController - if data[u'Members']: - for storage_member in data[u'Members']: - storage_member_uri = storage_member[u'@odata.id'] + if data['Members']: + for storage_member in data['Members']: + storage_member_uri = storage_member['@odata.id'] response = self.get_request(self.root_uri + storage_member_uri) data = response['data'] if key in data: - controller_list = data[key] + controllers_uri = data[key]['@odata.id'] + + response = self.get_request(self.root_uri + controllers_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + if data['Members']: + for controller_member in data['Members']: + controller_member_uri = controller_member['@odata.id'] + response = self.get_request(self.root_uri + controller_member_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + + controller_result = {} + for property in properties: + if property in data: + controller_result[property] = data[property] + controller_results.append(controller_result) + elif deprecated_key in data: + controller_list = data[deprecated_key] for controller in controller_list: controller_result = {} for property in properties: @@ -702,7 +859,7 @@ class RedfishUtils(object): properties = ['BlockSizeBytes', 'CapableSpeedGbs', 'CapacityBytes', 'EncryptionAbility', 'EncryptionStatus', 'FailurePredicted', 'HotspareType', 'Id', 'Identifiers', - 'Manufacturer', 'MediaType', 'Model', 'Name', + 'Links', 'Manufacturer', 'MediaType', 'Model', 'Name', 'PartNumber', 'PhysicalLocation', 'Protocol', 'Revision', 'RotationSpeedRPM', 'SerialNumber', 'Status'] @@ -718,16 +875,16 @@ class RedfishUtils(object): if 'Storage' in data: # Get a list of all storage controllers and build respective URIs - storage_uri = data[u'Storage'][u'@odata.id'] + storage_uri = data['Storage']['@odata.id'] response = self.get_request(self.root_uri + storage_uri) if response['ret'] is False: return response result['ret'] = True data = response['data'] - if data[u'Members']: - for controller in data[u'Members']: - controller_list.append(controller[u'@odata.id']) + if data['Members']: + for controller in data['Members']: + controller_list.append(controller['@odata.id']) for c in controller_list: uri = self.root_uri + c response = self.get_request(uri) @@ -735,7 +892,26 @@ class RedfishUtils(object): return response data = response['data'] controller_name = 'Controller 1' - if 'StorageControllers' in data: + storage_id = data['Id'] + if 'Controllers' in data: + controllers_uri = data['Controllers']['@odata.id'] + + response = self.get_request(self.root_uri + controllers_uri) + if response['ret'] is False: + return response + result['ret'] = True + cdata = response['data'] + + if cdata['Members']: + controller_member_uri = cdata['Members'][0]['@odata.id'] + + response = self.get_request(self.root_uri + controller_member_uri) + if response['ret'] is False: + return response + result['ret'] = True + cdata = response['data'] + controller_name = cdata['Name'] + elif 'StorageControllers' in data: sc = data['StorageControllers'] if sc: if 'Name' in sc[0]: @@ -745,18 +921,25 @@ class RedfishUtils(object): controller_name = 'Controller %s' % sc_id drive_results = [] if 'Drives' in data: - for device in data[u'Drives']: - disk_uri = self.root_uri + device[u'@odata.id'] + for device in data['Drives']: + disk_uri = self.root_uri + device['@odata.id'] response = self.get_request(disk_uri) data = response['data'] drive_result = {} + drive_result['RedfishURI'] = data['@odata.id'] for property in properties: if property in data: if data[property] is not None: - drive_result[property] = data[property] + if property == "Links": + if "Volumes" in data["Links"].keys(): + volumes = [v["@odata.id"] for v in data["Links"]["Volumes"]] + drive_result["Volumes"] = volumes + else: + drive_result[property] = data[property] drive_results.append(drive_result) drives = {'Controller': controller_name, + 'StorageId': storage_id, 'Drives': drive_results} result["entries"].append(drives) @@ -769,8 +952,8 @@ class RedfishUtils(object): result['ret'] = True data = response['data'] - for controller in data[u'Members']: - controller_list.append(controller[u'@odata.id']) + for controller in data['Members']: + controller_list.append(controller['@odata.id']) for c in controller_list: uri = self.root_uri + c @@ -784,7 +967,7 @@ class RedfishUtils(object): sc_id = data.get('Id', '1') controller_name = 'Controller %s' % sc_id drive_results = [] - for device in data[u'Devices']: + for device in data['Devices']: drive_result = {} for property in properties: if property in device: @@ -822,7 +1005,7 @@ class RedfishUtils(object): if 'Storage' in data: # Get a list of all storage controllers and build respective URIs - storage_uri = data[u'Storage'][u'@odata.id'] + storage_uri = data['Storage']['@odata.id'] response = self.get_request(self.root_uri + storage_uri) if response['ret'] is False: return response @@ -830,16 +1013,34 @@ class RedfishUtils(object): data = response['data'] if data.get('Members'): - for controller in data[u'Members']: - controller_list.append(controller[u'@odata.id']) - for c in controller_list: + for controller in data['Members']: + controller_list.append(controller['@odata.id']) + for idx, c in enumerate(controller_list): uri = self.root_uri + c response = self.get_request(uri) if response['ret'] is False: return response data = response['data'] - controller_name = 'Controller 1' - if 'StorageControllers' in data: + controller_name = 'Controller %s' % str(idx) + if 'Controllers' in data: + response = self.get_request(self.root_uri + data['Controllers']['@odata.id']) + if response['ret'] is False: + return response + c_data = response['data'] + + if c_data.get('Members') and c_data['Members']: + response = self.get_request(self.root_uri + c_data['Members'][0]['@odata.id']) + if response['ret'] is False: + return response + member_data = response['data'] + + if member_data: + if 'Name' in member_data: + controller_name = member_data['Name'] + else: + controller_id = member_data.get('Id', '1') + controller_name = 'Controller %s' % controller_id + elif 'StorageControllers' in data: sc = data['StorageControllers'] if sc: if 'Name' in sc[0]: @@ -848,15 +1049,16 @@ class RedfishUtils(object): sc_id = sc[0].get('Id', '1') controller_name = 'Controller %s' % sc_id volume_results = [] + volume_list = [] if 'Volumes' in data: # Get a list of all volumes and build respective URIs - volumes_uri = data[u'Volumes'][u'@odata.id'] + volumes_uri = data['Volumes']['@odata.id'] response = self.get_request(self.root_uri + volumes_uri) data = response['data'] if data.get('Members'): - for volume in data[u'Members']: - volume_list.append(volume[u'@odata.id']) + for volume in data['Members']: + volume_list.append(volume['@odata.id']) for v in volume_list: uri = self.root_uri + v response = self.get_request(uri) @@ -873,10 +1075,10 @@ class RedfishUtils(object): # Get related Drives Id drive_id_list = [] if 'Links' in data: - if 'Drives' in data[u'Links']: - for link in data[u'Links'][u'Drives']: - drive_id_link = link[u'@odata.id'] - drive_id = drive_id_link.split("/")[-1] + if 'Drives' in data['Links']: + for link in data['Links']['Drives']: + drive_id_link = link['@odata.id'] + drive_id = drive_id_link.rstrip('/').split('/')[-1] drive_id_list.append({'Id': drive_id}) volume_result['Linked_drives'] = drive_id_list volume_results.append(volume_result) @@ -935,20 +1137,27 @@ class RedfishUtils(object): return self.manage_power(command, self.systems_uri, '#ComputerSystem.Reset') - def manage_manager_power(self, command): + def manage_manager_power(self, command, wait=False, wait_timeout=120): return self.manage_power(command, self.manager_uri, - '#Manager.Reset') + '#Manager.Reset', wait, wait_timeout) - def manage_power(self, command, resource_uri, action_name): + def manage_power(self, command, resource_uri, action_name, wait=False, + wait_timeout=120): key = "Actions" reset_type_values = ['On', 'ForceOff', 'GracefulShutdown', 'GracefulRestart', 'ForceRestart', 'Nmi', - 'ForceOn', 'PushPowerButton', 'PowerCycle'] + 'ForceOn', 'PushPowerButton', 'PowerCycle', + 'FullPowerCycle'] # command should be PowerOn, PowerForceOff, etc. if not command.startswith('Power'): return {'ret': False, 'msg': 'Invalid Command (%s)' % command} - reset_type = command[5:] + + # Commands (except PowerCycle) will be stripped of the 'Power' prefix + if command == 'PowerCycle': + reset_type = command + else: + reset_type = command[5:] # map Reboot to a ResetType that does a reboot if reset_type == 'Reboot': @@ -994,34 +1203,123 @@ class RedfishUtils(object): response = self.post_request(self.root_uri + action_uri, payload) if response['ret'] is False: return response + + # If requested to wait for the service to be available again, block + # until it is ready + if wait: + elapsed_time = 0 + start_time = time.time() + # Start with a large enough sleep. Some services will process new + # requests while in the middle of shutting down, thus breaking out + # early. + time.sleep(30) + + # Periodically check for the service's availability. + while elapsed_time <= wait_timeout: + status = self.check_service_availability() + if status['available']: + # It is available; we are done + break + time.sleep(5) + elapsed_time = time.time() - start_time + + if elapsed_time > wait_timeout: + # Exhausted the wait timer; error + return {'ret': False, 'changed': True, + 'msg': 'The service did not become available after %d seconds' % wait_timeout} return {'ret': True, 'changed': True} - def _find_account_uri(self, username=None, acct_id=None): - if not any((username, acct_id)): - return {'ret': False, 'msg': - 'Must provide either account_id or account_username'} + def manager_reset_to_defaults(self, command): + return self.reset_to_defaults(command, self.manager_uri, + '#Manager.ResetToDefaults') - response = self.get_request(self.root_uri + self.accounts_uri) + def reset_to_defaults(self, command, resource_uri, action_name): + key = "Actions" + reset_type_values = ['ResetAll', + 'PreserveNetworkAndUsers', + 'PreserveNetwork'] + + if command not in reset_type_values: + return {'ret': False, 'msg': 'Invalid Command (%s)' % command} + + # read the resource and get the current power state + response = self.get_request(self.root_uri + resource_uri) if response['ret'] is False: return response data = response['data'] - uris = [a.get('@odata.id') for a in data.get('Members', []) if - a.get('@odata.id')] - for uri in uris: - response = self.get_request(self.root_uri + uri) + # get the reset Action and target URI + if key not in data or action_name not in data[key]: + return {'ret': False, 'msg': 'Action %s not found' % action_name} + reset_action = data[key][action_name] + if 'target' not in reset_action: + return {'ret': False, + 'msg': 'target URI missing from Action %s' % action_name} + action_uri = reset_action['target'] + + # get AllowableValues + ai = self._get_all_action_info_values(reset_action) + allowable_values = ai.get('ResetType', {}).get('AllowableValues', []) + + # map ResetType to an allowable value if needed + if allowable_values and command not in allowable_values: + return {'ret': False, + 'msg': 'Specified reset type (%s) not supported ' + 'by service. Supported types: %s' % + (command, allowable_values)} + + # define payload + payload = {'ResetType': command} + + # POST to Action URI + response = self.post_request(self.root_uri + action_uri, payload) + if response['ret'] is False: + return response + return {'ret': True, 'changed': True} + + def _find_account_uri(self, username=None, acct_id=None, password_change_uri=None): + if not any((username, acct_id)): + return {'ret': False, 'msg': + 'Must provide either account_id or account_username'} + + if password_change_uri: + # Password change required; go directly to the specified URI + response = self.get_request(self.root_uri + password_change_uri) if response['ret'] is False: - continue + return response data = response['data'] headers = response['headers'] if username: if username == data.get('UserName'): return {'ret': True, 'data': data, - 'headers': headers, 'uri': uri} + 'headers': headers, 'uri': password_change_uri} if acct_id: if acct_id == data.get('Id'): return {'ret': True, 'data': data, - 'headers': headers, 'uri': uri} + 'headers': headers, 'uri': password_change_uri} + else: + # Walk the accounts collection to find the desired user + response = self.get_request(self.root_uri + self.accounts_uri) + if response['ret'] is False: + return response + data = response['data'] + + uris = [a.get('@odata.id') for a in data.get('Members', []) if + a.get('@odata.id')] + for uri in uris: + response = self.get_request(self.root_uri + uri) + if response['ret'] is False: + continue + data = response['data'] + headers = response['headers'] + if username: + if username == data.get('UserName'): + return {'ret': True, 'data': data, + 'headers': headers, 'uri': uri} + if acct_id: + if acct_id == data.get('Id'): + return {'ret': True, 'data': data, + 'headers': headers, 'uri': uri} return {'ret': False, 'no_match': True, 'msg': 'No account with the given account_id or account_username found'} @@ -1056,7 +1354,8 @@ class RedfishUtils(object): user_list = [] users_results = [] # Get these entries, but does not fail if not found - properties = ['Id', 'Name', 'UserName', 'RoleId', 'Locked', 'Enabled'] + properties = ['Id', 'Name', 'UserName', 'RoleId', 'Locked', 'Enabled', + 'AccountTypes', 'OEMAccountTypes'] response = self.get_request(self.root_uri + self.accounts_uri) if response['ret'] is False: @@ -1065,7 +1364,7 @@ class RedfishUtils(object): data = response['data'] for users in data.get('Members', []): - user_list.append(users[u'@odata.id']) # user_list[] are URIs + user_list.append(users['@odata.id']) # user_list[] are URIs # for each user, get details for uri in user_list: @@ -1079,6 +1378,12 @@ class RedfishUtils(object): if property in data: user[property] = data[property] + # Filter out empty account slots + # An empty account slot can be detected if the username is an empty + # string and if the account is disabled + if user.get('UserName', '') == '' and not user.get('Enabled', False): + continue + users_results.append(user) result["entries"] = users_results return result @@ -1101,6 +1406,10 @@ class RedfishUtils(object): payload['Password'] = user.get('account_password') if user.get('account_roleid'): payload['RoleId'] = user.get('account_roleid') + if user.get('account_accounttypes'): + payload['AccountTypes'] = user.get('account_accounttypes') + if user.get('account_oemaccounttypes'): + payload['OEMAccountTypes'] = user.get('account_oemaccounttypes') return self.patch_request(self.root_uri + uri, payload, check_pyld=True) def add_user(self, user): @@ -1131,6 +1440,10 @@ class RedfishUtils(object): payload['Password'] = user.get('account_password') if user.get('account_roleid'): payload['RoleId'] = user.get('account_roleid') + if user.get('account_accounttypes'): + payload['AccountTypes'] = user.get('account_accounttypes') + if user.get('account_oemaccounttypes'): + payload['OEMAccountTypes'] = user.get('account_oemaccounttypes') if user.get('account_id'): payload['Id'] = user.get('account_id') @@ -1227,7 +1540,8 @@ class RedfishUtils(object): 'Must provide account_password for UpdateUserPassword command'} response = self._find_account_uri(username=user.get('account_username'), - acct_id=user.get('account_id')) + acct_id=user.get('account_id'), + password_change_uri=user.get('account_passwordchangerequired')) if not response['ret']: return response @@ -1270,6 +1584,52 @@ class RedfishUtils(object): resp['msg'] = 'Modified account service' return resp + def update_user_accounttypes(self, user): + account_types = user.get('account_accounttypes') + oemaccount_types = user.get('account_oemaccounttypes') + if account_types is None and oemaccount_types is None: + return {'ret': False, 'msg': + 'Must provide account_accounttypes or account_oemaccounttypes for UpdateUserAccountTypes command'} + + response = self._find_account_uri(username=user.get('account_username'), + acct_id=user.get('account_id')) + if not response['ret']: + return response + + uri = response['uri'] + payload = {} + if user.get('account_accounttypes'): + payload['AccountTypes'] = user.get('account_accounttypes') + if user.get('account_oemaccounttypes'): + payload['OEMAccountTypes'] = user.get('account_oemaccounttypes') + + return self.patch_request(self.root_uri + uri, payload, check_pyld=True) + + def check_password_change_required(self, return_data): + """ + Checks a response if a user needs to change their password + + :param return_data: The return data for a failed request + :return: None or the URI of the account to update + """ + uri = None + if 'data' in return_data: + # Find the extended messages in the response payload + extended_messages = return_data['data'].get('error', {}).get('@Message.ExtendedInfo', []) + if len(extended_messages) == 0: + extended_messages = return_data['data'].get('@Message.ExtendedInfo', []) + # Go through each message and look for Base.1.X.PasswordChangeRequired + for message in extended_messages: + message_id = message.get('MessageId') + if message_id is None: + # While this is invalid, treat the lack of a MessageId as "no message" + continue + if message_id.startswith('Base.1.') and message_id.endswith('.PasswordChangeRequired'): + # Password change required; get the URI of the user account + uri = message['MessageArgs'][0] + break + return uri + def get_sessions(self): result = {} # listing all users has always been slower than other operations, why? @@ -1284,8 +1644,8 @@ class RedfishUtils(object): result['ret'] = True data = response['data'] - for sessions in data[u'Members']: - session_list.append(sessions[u'@odata.id']) # session_list[] are URIs + for sessions in data['Members']: + session_list.append(sessions['@odata.id']) # session_list[] are URIs # for each session, get details for uri in session_list: @@ -1314,8 +1674,8 @@ class RedfishUtils(object): return {'ret': True, 'changed': False, 'msg': "There are no active sessions"} # loop to delete every active session - for session in data[u'Members']: - response = self.delete_request(self.root_uri + session[u'@odata.id']) + for session in data['Members']: + response = self.delete_request(self.root_uri + session['@odata.id']) if response['ret'] is False: return response @@ -1381,6 +1741,8 @@ class RedfishUtils(object): data = response['data'] + result['multipart_supported'] = 'MultipartHttpPushUri' in data + if "Actions" in data: actions = data['Actions'] if len(actions) > 0: @@ -1400,29 +1762,37 @@ class RedfishUtils(object): def _software_inventory(self, uri): result = {} - response = self.get_request(self.root_uri + uri) - if response['ret'] is False: - return response - result['ret'] = True - data = response['data'] - result['entries'] = [] - for member in data[u'Members']: - uri = self.root_uri + member[u'@odata.id'] - # Get details for each software or firmware member - response = self.get_request(uri) + + while uri: + response = self.get_request(self.root_uri + uri) if response['ret'] is False: return response result['ret'] = True + data = response['data'] - software = {} - # Get these standard properties if present - for key in ['Name', 'Id', 'Status', 'Version', 'Updateable', - 'SoftwareId', 'LowestSupportedVersion', 'Manufacturer', - 'ReleaseDate']: - if key in data: - software[key] = data.get(key) - result['entries'].append(software) + if data.get('Members@odata.nextLink'): + uri = data.get('Members@odata.nextLink') + else: + uri = None + + for member in data['Members']: + fw_uri = self.root_uri + member['@odata.id'] + # Get details for each software or firmware member + response = self.get_request(fw_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + software = {} + # Get these standard properties if present + for key in ['Name', 'Id', 'Status', 'Version', 'Updateable', + 'SoftwareId', 'LowestSupportedVersion', 'Manufacturer', + 'ReleaseDate']: + if key in data: + software[key] = data.get(key) + result['entries'].append(software) + return result def get_firmware_inventory(self): @@ -1471,7 +1841,7 @@ class RedfishUtils(object): operation_results['status'] = data.get('TaskState', data.get('JobState')) operation_results['messages'] = data.get('Messages', []) else: - # Error response body, which is a bit of a misnomer since it's used in successful action responses + # Error response body, which is a bit of a misnomer since it is used in successful action responses operation_results['status'] = 'Completed' if response.status >= 400: operation_results['status'] = 'Exception' @@ -1490,7 +1860,10 @@ class RedfishUtils(object): # Scan the messages to see if next steps are needed for message in operation_results['messages']: - message_id = message['MessageId'] + message_id = message.get('MessageId') + if message_id is None: + # While this is invalid, treat the lack of a MessageId as "no message" + continue if message_id.startswith('Update.1.') and message_id.endswith('.OperationTransitionedToJob'): # Operation rerouted to a job; update the status and handle @@ -1572,6 +1945,72 @@ class RedfishUtils(object): 'msg': "SimpleUpdate requested", 'update_status': self._operation_results(response['resp'], response['data'])} + def multipath_http_push_update(self, update_opts): + """ + Provides a software update via the URI specified by the + MultipartHttpPushUri property. Callers should adjust the 'timeout' + variable in the base object to accommodate the size of the image and + speed of the transfer. For example, a 200MB image will likely take + more than the default 10 second timeout. + + :param update_opts: The parameters for the update operation + :return: dict containing the response of the update request + """ + image_file = update_opts.get('update_image_file') + targets = update_opts.get('update_targets') + apply_time = update_opts.get('update_apply_time') + oem_params = update_opts.get('update_oem_params') + custom_oem_header = update_opts.get('update_custom_oem_header') + custom_oem_mime_type = update_opts.get('update_custom_oem_mime_type') + custom_oem_params = update_opts.get('update_custom_oem_params') + + # Ensure the image file is provided + if not image_file: + return {'ret': False, 'msg': + 'Must specify update_image_file for the MultipartHTTPPushUpdate command'} + if not os.path.isfile(image_file): + return {'ret': False, 'msg': + 'Must specify a valid file for the MultipartHTTPPushUpdate command'} + try: + with open(image_file, 'rb') as f: + image_payload = f.read() + except Exception as e: + return {'ret': False, 'msg': + 'Could not read file %s' % image_file} + + # Check that multipart HTTP push updates are supported + response = self.get_request(self.root_uri + self.update_uri) + if response['ret'] is False: + return response + data = response['data'] + if 'MultipartHttpPushUri' not in data: + return {'ret': False, 'msg': 'Service does not support MultipartHttpPushUri'} + update_uri = data['MultipartHttpPushUri'] + + # Assemble the JSON payload portion of the request + payload = {} + if targets: + payload["Targets"] = targets + if apply_time: + payload["@Redfish.OperationApplyTime"] = apply_time + if oem_params: + payload["Oem"] = oem_params + multipart_payload = { + 'UpdateParameters': {'content': json.dumps(payload), 'mime_type': 'application/json'}, + 'UpdateFile': {'filename': image_file, 'content': image_payload, 'mime_type': 'application/octet-stream'} + } + if custom_oem_params: + multipart_payload[custom_oem_header] = {'content': custom_oem_params} + if custom_oem_mime_type: + multipart_payload[custom_oem_header]['mime_type'] = custom_oem_mime_type + + response = self.post_request(self.root_uri + update_uri, multipart_payload, multipart=True) + if response['ret'] is False: + return response + return {'ret': True, 'changed': True, + 'msg': "MultipartHTTPPushUpdate requested", + 'update_status': self._operation_results(response['resp'], response['data'])} + def get_update_status(self, update_handle): """ Gets the status of an update operation. @@ -1584,7 +2023,7 @@ class RedfishUtils(object): return {'ret': False, 'msg': 'Must provide a handle tracking the update.'} # Get the task or job tracking the update - response = self.get_request(self.root_uri + update_handle) + response = self.get_request(self.root_uri + update_handle, allow_no_resp=True) if response['ret'] is False: return response @@ -1645,7 +2084,7 @@ class RedfishUtils(object): return response result['ret'] = True data = response['data'] - for attribute in data[u'Attributes'].items(): + for attribute in data['Attributes'].items(): bios_attributes[attribute[0]] = attribute[1] result["entries"] = bios_attributes return result @@ -1901,14 +2340,14 @@ class RedfishUtils(object): # Check the attributes for attr_name, attr_value in attributes.items(): # Check if attribute exists - if attr_name not in data[u'Attributes']: + if attr_name not in data['Attributes']: # Remove and proceed to next attribute if this isn't valid attrs_bad.update({attr_name: attr_value}) del attrs_to_patch[attr_name] continue # If already set to requested value, remove it from PATCH payload - if data[u'Attributes'][attr_name] == attributes[attr_name]: + if data['Attributes'][attr_name] == attr_value: del attrs_to_patch[attr_name] warning = "" @@ -1928,11 +2367,19 @@ class RedfishUtils(object): # Construct payload and issue PATCH command payload = {"Attributes": attrs_to_patch} + + # WORKAROUND + # Dell systems require manually setting the apply time to "OnReset" + # to spawn a proprietary job to apply the BIOS settings + vendor = self._get_vendor()['Vendor'] + if vendor == 'Dell': + payload.update({"@Redfish.SettingsApplyTime": {"ApplyTime": "OnReset"}}) + response = self.patch_request(self.root_uri + set_bios_attr_uri, payload) if response['ret'] is False: return response return {'ret': True, 'changed': True, - 'msg': "Modified BIOS attributes %s" % (attrs_to_patch), + 'msg': "Modified BIOS attributes %s. A reboot is required" % (attrs_to_patch), 'warning': warning} def set_boot_order(self, boot_list): @@ -2045,8 +2492,8 @@ class RedfishUtils(object): data = response['data'] # Checking if fans are present - if u'Fans' in data: - for device in data[u'Fans']: + if 'Fans' in data: + for device in data['Fans']: fan = {} for property in properties: if property in device: @@ -2121,7 +2568,7 @@ class RedfishUtils(object): result['ret'] = True data = response['data'] if "Temperatures" in data: - for sensor in data[u'Temperatures']: + for sensor in data['Temperatures']: sensor_result = {} for property in properties: if property in sensor: @@ -2142,7 +2589,7 @@ class RedfishUtils(object): key = "Processors" # Get these entries, but does not fail if not found properties = ['Id', 'Name', 'Manufacturer', 'Model', 'MaxSpeedMHz', - 'TotalCores', 'TotalThreads', 'Status'] + 'ProcessorArchitecture', 'TotalCores', 'TotalThreads', 'Status'] # Search for 'key' entry and extract URI from it response = self.get_request(self.root_uri + systems_uri) @@ -2163,8 +2610,8 @@ class RedfishUtils(object): result['ret'] = True data = response['data'] - for cpu in data[u'Members']: - cpu_list.append(cpu[u'@odata.id']) + for cpu in data['Members']: + cpu_list.append(cpu['@odata.id']) for c in cpu_list: cpu = {} @@ -2213,8 +2660,8 @@ class RedfishUtils(object): result['ret'] = True data = response['data'] - for dimm in data[u'Members']: - memory_list.append(dimm[u'@odata.id']) + for dimm in data['Members']: + memory_list.append(dimm['@odata.id']) for m in memory_list: dimm = {} @@ -2246,7 +2693,7 @@ class RedfishUtils(object): result = {} properties = ['Name', 'Id', 'Description', 'FQDN', 'IPv4Addresses', 'IPv6Addresses', 'NameServers', 'MACAddress', 'PermanentMACAddress', - 'SpeedMbps', 'MTUSize', 'AutoNeg', 'Status'] + 'SpeedMbps', 'MTUSize', 'AutoNeg', 'Status', 'LinkStatus'] response = self.get_request(self.root_uri + resource_uri) if response['ret'] is False: return response @@ -2283,8 +2730,8 @@ class RedfishUtils(object): result['ret'] = True data = response['data'] - for nic in data[u'Members']: - nic_list.append(nic[u'@odata.id']) + for nic in data['Members']: + nic_list.append(nic['@odata.id']) for n in nic_list: nic = self.get_nic(n) @@ -2339,8 +2786,8 @@ class RedfishUtils(object): result['ret'] = True data = response['data'] - for virtualmedia in data[u'Members']: - virtualmedia_list.append(virtualmedia[u'@odata.id']) + for virtualmedia in data['Members']: + virtualmedia_list.append(virtualmedia['@odata.id']) for n in virtualmedia_list: virtualmedia = {} @@ -2446,9 +2893,11 @@ class RedfishUtils(object): def virtual_media_insert_via_patch(self, options, param_map, uri, data, image_only=False): # get AllowableValues - ai = dict((k[:-24], - {'AllowableValues': v}) for k, v in data.items() - if k.endswith('@Redfish.AllowableValues')) + ai = { + k[:-24]: {'AllowableValues': v} + for k, v in data.items() + if k.endswith('@Redfish.AllowableValues') + } # construct payload payload = self._insert_virt_media_payload(options, param_map, data, ai) if 'Inserted' not in payload and not image_only: @@ -2504,8 +2953,8 @@ class RedfishUtils(object): return response data = response['data'] virt_media_list = [] - for member in data[u'Members']: - virt_media_list.append(member[u'@odata.id']) + for member in data['Members']: + virt_media_list.append(member['@odata.id']) resources, headers = self._read_virt_media_resources(virt_media_list) # see if image already inserted; if so, nothing to do @@ -2619,8 +3068,8 @@ class RedfishUtils(object): return response data = response['data'] virt_media_list = [] - for member in data[u'Members']: - virt_media_list.append(member[u'@odata.id']) + for member in data['Members']: + virt_media_list.append(member['@odata.id']) resources, headers = self._read_virt_media_resources(virt_media_list) # find the VirtualMedia resource to eject @@ -2678,8 +3127,7 @@ class RedfishUtils(object): # Get a list of all Chassis and build URIs, then get all PowerSupplies # from each Power entry in the Chassis - chassis_uri_list = self.chassis_uris - for chassis_uri in chassis_uri_list: + for chassis_uri in self.chassis_uris: response = self.get_request(self.root_uri + chassis_uri) if response['ret'] is False: return response @@ -2688,7 +3136,7 @@ class RedfishUtils(object): data = response['data'] if 'Power' in data: - power_uri = data[u'Power'][u'@odata.id'] + power_uri = data['Power']['@odata.id'] else: continue @@ -2726,7 +3174,7 @@ class RedfishUtils(object): result = {} inventory = {} # Get these entries, but does not fail if not found - properties = ['Status', 'HostName', 'PowerState', 'Model', 'Manufacturer', + properties = ['Status', 'HostName', 'PowerState', 'BootProgress', 'Model', 'Manufacturer', 'PartNumber', 'SystemType', 'AssetTag', 'ServiceTag', 'SerialNumber', 'SKU', 'BiosVersion', 'MemorySummary', 'ProcessorSummary', 'TrustedModules', 'Name', 'Id'] @@ -3040,7 +3488,7 @@ class RedfishUtils(object): # Capture list of URIs that match a specified HostInterface resource Id if hostinterface_id: - matching_hostinterface_uris = [uri for uri in uris if hostinterface_id in uri.split('/')[-1]] + matching_hostinterface_uris = [uri for uri in uris if hostinterface_id in uri.rstrip('/').split('/')[-1]] if hostinterface_id and matching_hostinterface_uris: hostinterface_uri = list.pop(matching_hostinterface_uris) elif hostinterface_id and not matching_hostinterface_uris: @@ -3135,8 +3583,9 @@ class RedfishUtils(object): result = {} inventory = {} # Get these entries, but does not fail if not found - properties = ['FirmwareVersion', 'ManagerType', 'Manufacturer', 'Model', - 'PartNumber', 'PowerState', 'SerialNumber', 'Status', 'UUID'] + properties = ['Id', 'FirmwareVersion', 'ManagerType', 'Manufacturer', 'Model', + 'PartNumber', 'PowerState', 'SerialNumber', 'ServiceIdentification', + 'Status', 'UUID'] response = self.get_request(self.root_uri + manager_uri) if response['ret'] is False: @@ -3154,6 +3603,35 @@ class RedfishUtils(object): def get_multi_manager_inventory(self): return self.aggregate_managers(self.get_manager_inventory) + def get_service_identification(self, manager): + result = {} + if manager is None: + if len(self.manager_uris) == 1: + manager = self.manager_uris[0].rstrip('/').split('/')[-1] + elif len(self.manager_uris) > 1: + entries = self.get_multi_manager_inventory()['entries'] + managers = [m[0]['manager_uri'] for m in entries if m[1].get('ServiceIdentification')] + if len(managers) == 1: + manager = managers[0].rstrip('/').split('/')[-1] + else: + self.module.fail_json(msg=[ + "Multiple managers with ServiceIdentification were found: %s" % str(managers), + "Please specify by using the 'manager' parameter in your playbook"]) + elif len(self.manager_uris) == 0: + self.module.fail_json(msg="No manager identities were found") + response = self.get_request(self.root_uri + '/redfish/v1/Managers/' + manager, override_headers=None) + try: + result['service_identification'] = response['data']['ServiceIdentification'] + except Exception as e: + self.module.fail_json(msg="Service ID not found for manager %s" % manager) + result['ret'] = True + return result + + def set_service_identification(self, service_id): + data = {"ServiceIdentification": service_id} + resp = self.patch_request(self.root_uri + '/redfish/v1/Managers/' + self.resource_id, data, check_pyld=True) + return resp + def set_session_service(self, sessions_config): if sessions_config is None: return {'ret': False, 'msg': @@ -3163,3 +3641,376 @@ class RedfishUtils(object): if resp['ret'] and resp['changed']: resp['msg'] = 'Modified session service' return resp + + def verify_bios_attributes(self, bios_attributes): + # This method verifies BIOS attributes against the provided input + server_bios = self.get_bios_attributes(self.systems_uri) + if server_bios["ret"] is False: + return server_bios + + bios_dict = {} + wrong_param = {} + + # Verify bios_attributes with BIOS settings available in the server + for key, value in bios_attributes.items(): + if key in server_bios["entries"]: + if server_bios["entries"][key] != value: + bios_dict.update({key: value}) + else: + wrong_param.update({key: value}) + + if wrong_param: + return { + "ret": False, + "msg": "Wrong parameters are provided: %s" % wrong_param + } + + if bios_dict: + return { + "ret": False, + "msg": "BIOS parameters are not matching: %s" % bios_dict + } + + return { + "ret": True, + "changed": False, + "msg": "BIOS verification completed" + } + + def enable_secure_boot(self): + # This function enable Secure Boot on an OOB controller + + response = self.get_request(self.root_uri + self.systems_uri) + if response["ret"] is False: + return response + + server_details = response["data"] + secure_boot_url = server_details["SecureBoot"]["@odata.id"] + + response = self.get_request(self.root_uri + secure_boot_url) + if response["ret"] is False: + return response + + body = {} + body["SecureBootEnable"] = True + + return self.patch_request(self.root_uri + secure_boot_url, body, check_pyld=True) + + def set_secure_boot(self, secure_boot_enable): + # This function enable Secure Boot on an OOB controller + + response = self.get_request(self.root_uri + self.systems_uri) + if response["ret"] is False: + return response + + server_details = response["data"] + secure_boot_url = server_details["SecureBoot"]["@odata.id"] + + response = self.get_request(self.root_uri + secure_boot_url) + if response["ret"] is False: + return response + + body = {} + body["SecureBootEnable"] = secure_boot_enable + + return self.patch_request(self.root_uri + secure_boot_url, body, check_pyld=True) + + def get_hpe_thermal_config(self): + result = {} + key = "Thermal" + # Go through list + for chassis_uri in self.chassis_uris: + response = self.get_request(self.root_uri + chassis_uri) + if response['ret'] is False: + return response + result['ret'] = True + data = response['data'] + val = data.get('Oem', {}).get('Hpe', {}).get('ThermalConfiguration') + if val is not None: + return {"ret": True, "current_thermal_config": val} + return {"ret": False} + + def get_hpe_fan_percent_min(self): + result = {} + key = "Thermal" + # Go through list + for chassis_uri in self.chassis_uris: + response = self.get_request(self.root_uri + chassis_uri) + if response['ret'] is False: + return response + data = response['data'] + val = data.get('Oem', {}).get('Hpe', {}).get('FanPercentMinimum') + if val is not None: + return {"ret": True, "fan_percent_min": val} + return {"ret": False} + + def delete_volumes(self, storage_subsystem_id, volume_ids): + # Find the Storage resource from the requested ComputerSystem resource + response = self.get_request(self.root_uri + self.systems_uri) + if response['ret'] is False: + return response + data = response['data'] + storage_uri = data.get('Storage', {}).get('@odata.id') + if storage_uri is None: + return {'ret': False, 'msg': 'Storage resource not found'} + + # Get Storage Collection + response = self.get_request(self.root_uri + storage_uri) + if response['ret'] is False: + return response + data = response['data'] + + # Collect Storage Subsystems + self.storage_subsystems_uris = [i['@odata.id'] for i in response['data'].get('Members', [])] + if not self.storage_subsystems_uris: + return { + 'ret': False, + 'msg': "StorageCollection's Members array is either empty or missing"} + + # Matching Storage Subsystem ID with user input + self.storage_subsystem_uri = "" + for storage_subsystem_uri in self.storage_subsystems_uris: + if storage_subsystem_uri.rstrip('/').split('/')[-1] == storage_subsystem_id: + self.storage_subsystem_uri = storage_subsystem_uri + + if not self.storage_subsystem_uri: + return { + 'ret': False, + 'msg': "Provided Storage Subsystem ID %s does not exist on the server" % storage_subsystem_id} + + # Get Volume Collection + response = self.get_request(self.root_uri + self.storage_subsystem_uri) + if response['ret'] is False: + return response + data = response['data'] + + response = self.get_request(self.root_uri + data['Volumes']['@odata.id']) + if response['ret'] is False: + return response + data = response['data'] + + # Collect Volumes + self.volume_uris = [i['@odata.id'] for i in response['data'].get('Members', [])] + if not self.volume_uris: + return { + 'ret': True, 'changed': False, + 'msg': "VolumeCollection's Members array is either empty or missing"} + + # Delete each volume + for volume in self.volume_uris: + if volume.rstrip('/').split('/')[-1] in volume_ids: + response = self.delete_request(self.root_uri + volume) + if response['ret'] is False: + return response + + return {'ret': True, 'changed': True, + 'msg': "The following volumes were deleted: %s" % str(volume_ids)} + + def create_volume(self, volume_details, storage_subsystem_id, storage_none_volume_deletion=False): + # Find the Storage resource from the requested ComputerSystem resource + response = self.get_request(self.root_uri + self.systems_uri) + if response['ret'] is False: + return response + data = response['data'] + storage_uri = data.get('Storage', {}).get('@odata.id') + if storage_uri is None: + return {'ret': False, 'msg': 'Storage resource not found'} + + # Get Storage Collection + response = self.get_request(self.root_uri + storage_uri) + if response['ret'] is False: + return response + data = response['data'] + + # Collect Storage Subsystems + self.storage_subsystems_uris = [i['@odata.id'] for i in response['data'].get('Members', [])] + if not self.storage_subsystems_uris: + return { + 'ret': False, + 'msg': "StorageCollection's Members array is either empty or missing"} + + # Matching Storage Subsystem ID with user input + self.storage_subsystem_uri = "" + for storage_subsystem_uri in self.storage_subsystems_uris: + if storage_subsystem_uri.rstrip('/').split('/')[-1] == storage_subsystem_id: + self.storage_subsystem_uri = storage_subsystem_uri + + if not self.storage_subsystem_uri: + return { + 'ret': False, + 'msg': "Provided Storage Subsystem ID %s does not exist on the server" % storage_subsystem_id} + + # Validate input parameters + required_parameters = ['RAIDType', 'Drives'] + allowed_parameters = ['CapacityBytes', 'DisplayName', 'InitializeMethod', 'MediaSpanCount', + 'Name', 'ReadCachePolicy', 'StripSizeBytes', 'VolumeUsage', 'WriteCachePolicy'] + + for parameter in required_parameters: + if not volume_details.get(parameter): + return { + 'ret': False, + 'msg': "%s are required parameter to create a volume" % str(required_parameters)} + + # Navigate to the volume uri of the correct storage subsystem + response = self.get_request(self.root_uri + self.storage_subsystem_uri) + if response['ret'] is False: + return response + data = response['data'] + + # Deleting any volumes of RAIDType None present on the Storage Subsystem + if storage_none_volume_deletion: + response = self.get_request(self.root_uri + data['Volumes']['@odata.id']) + if response['ret'] is False: + return response + volume_data = response['data'] + + if "Members" in volume_data: + for member in volume_data["Members"]: + response = self.get_request(self.root_uri + member['@odata.id']) + if response['ret'] is False: + return response + member_data = response['data'] + + if member_data["RAIDType"] == "None": + response = self.delete_request(self.root_uri + member['@odata.id']) + if response['ret'] is False: + return response + + # Construct payload and issue POST command to create volume + volume_details["Links"] = {} + volume_details["Links"]["Drives"] = [] + for drive in volume_details["Drives"]: + volume_details["Links"]["Drives"].append({"@odata.id": drive}) + del volume_details["Drives"] + payload = volume_details + response = self.post_request(self.root_uri + data['Volumes']['@odata.id'], payload) + if response['ret'] is False: + return response + + return {'ret': True, 'changed': True, + 'msg': "Volume Created"} + + def get_bios_registries(self): + # Get /redfish/v1 + response = self.get_request(self.root_uri + self.systems_uri) + if not response["ret"]: + return response + + server_details = response["data"] + + # Get Registries URI + if "Bios" not in server_details: + msg = "Getting BIOS URI failed, Key 'Bios' not found in /redfish/v1/Systems/1/ response: %s" + return { + "ret": False, + "msg": msg % str(server_details) + } + + bios_uri = server_details["Bios"]["@odata.id"] + bios_resp = self.get_request(self.root_uri + bios_uri) + if not bios_resp["ret"]: + return bios_resp + + bios_data = bios_resp["data"] + attribute_registry = bios_data["AttributeRegistry"] + + reg_uri = self.root_uri + self.service_root + "Registries/" + attribute_registry + reg_resp = self.get_request(reg_uri) + if not reg_resp["ret"]: + return reg_resp + + reg_data = reg_resp["data"] + + # Get BIOS attribute registry URI + lst = [] + + # Get the location URI + response = self.check_location_uri(reg_data, reg_uri) + if not response["ret"]: + return response + + rsp_data, rsp_uri = response["rsp_data"], response["rsp_uri"] + + if "RegistryEntries" not in rsp_data: + return { + "msg": "'RegistryEntries' not present in %s response, %s" % (rsp_uri, str(rsp_data)), + "ret": False + } + + return { + "bios_registry": rsp_data, + "bios_registry_uri": rsp_uri, + "ret": True + } + + def check_location_uri(self, resp_data, resp_uri): + # Get the location URI response + # return {"msg": self.creds, "ret": False} + vendor = self._get_vendor()['Vendor'] + rsp_uri = "" + for loc in resp_data['Location']: + if loc['Language'].startswith("en"): + rsp_uri = loc['Uri'] + if vendor == 'HPE': + # WORKAROUND + # HPE systems with iLO 4 will have BIOS Attribute Registries location URI as a dictionary with key 'extref' + # Hence adding condition to fetch the Uri + if isinstance(loc['Uri'], dict) and "extref" in loc['Uri'].keys(): + rsp_uri = loc['Uri']['extref'] + if not rsp_uri: + msg = "Language 'en' not found in BIOS Attribute Registries location, URI: %s, response: %s" + return { + "ret": False, + "msg": msg % (resp_uri, str(resp_data)) + } + + res = self.get_request(self.root_uri + rsp_uri) + if res['ret'] is False: + # WORKAROUND + # HPE systems with iLO 4 or iLO5 compresses (gzip) for some URIs + # Hence adding encoding to the header + if vendor == 'HPE': + override_headers = {"Accept-Encoding": "gzip"} + res = self.get_request(self.root_uri + rsp_uri, override_headers=override_headers) + if res['ret']: + return { + "ret": True, + "rsp_data": res["data"], + "rsp_uri": rsp_uri + } + return res + + def get_accountservice_properties(self): + # Find the AccountService resource + response = self.get_request(self.root_uri + self.service_root) + if response['ret'] is False: + return response + data = response['data'] + accountservice_uri = data.get("AccountService", {}).get("@odata.id") + if accountservice_uri is None: + return {'ret': False, 'msg': "AccountService resource not found"} + + response = self.get_request(self.root_uri + accountservice_uri) + if response['ret'] is False: + return response + return { + 'ret': True, + 'entries': response['data'] + } + + def get_power_restore_policy(self, systems_uri): + # Retrieve System resource + response = self.get_request(self.root_uri + systems_uri) + if response['ret'] is False: + return response + return { + 'ret': True, + 'entries': response['data']['PowerRestorePolicy'] + } + + def get_multi_power_restore_policy(self): + return self.aggregate_systems(self.get_power_restore_policy) + + def set_power_restore_policy(self, policy): + body = {'PowerRestorePolicy': policy} + return self.patch_request(self.root_uri + self.systems_uri, body, check_pyld=True) diff --git a/plugins/module_utils/redhat.py b/plugins/module_utils/redhat.py deleted file mode 100644 index f82cffaa0d..0000000000 --- a/plugins/module_utils/redhat.py +++ /dev/null @@ -1,272 +0,0 @@ -# -*- coding: utf-8 -*- -# This code is part of Ansible, but is an independent component. -# This particular file snippet, and this file snippet only, is BSD licensed. -# Modules you write using this snippet, which is embedded dynamically by Ansible -# still belong to the author of the module, and may assign their own license -# to the complete work. -# -# Copyright (c), James Laska -# -# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) -# SPDX-License-Identifier: BSD-2-Clause - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -import os -import re -import shutil -import tempfile -import types - -from ansible.module_utils.six.moves import configparser - - -class RegistrationBase(object): - def __init__(self, module, username=None, password=None): - self.module = module - self.username = username - self.password = password - - def configure(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def enable(self): - # Remove any existing redhat.repo - redhat_repo = '/etc/yum.repos.d/redhat.repo' - if os.path.isfile(redhat_repo): - os.unlink(redhat_repo) - - def register(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def unregister(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def unsubscribe(self): - raise NotImplementedError("Must be implemented by a sub-class") - - def update_plugin_conf(self, plugin, enabled=True): - plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin - - if os.path.isfile(plugin_conf): - tmpfd, tmpfile = tempfile.mkstemp() - shutil.copy2(plugin_conf, tmpfile) - cfg = configparser.ConfigParser() - cfg.read([tmpfile]) - - if enabled: - cfg.set('main', 'enabled', 1) - else: - cfg.set('main', 'enabled', 0) - - fd = open(tmpfile, 'w+') - cfg.write(fd) - fd.close() - self.module.atomic_move(tmpfile, plugin_conf) - - def subscribe(self, **kwargs): - raise NotImplementedError("Must be implemented by a sub-class") - - -class Rhsm(RegistrationBase): - def __init__(self, module, username=None, password=None): - RegistrationBase.__init__(self, module, username, password) - self.config = self._read_config() - self.module = module - - def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'): - ''' - Load RHSM configuration from /etc/rhsm/rhsm.conf. - Returns: - * ConfigParser object - ''' - - # Read RHSM defaults ... - cp = configparser.ConfigParser() - cp.read(rhsm_conf) - - # Add support for specifying a default value w/o having to standup some configuration - # Yeah, I know this should be subclassed ... but, oh well - def get_option_default(self, key, default=''): - sect, opt = key.split('.', 1) - if self.has_section(sect) and self.has_option(sect, opt): - return self.get(sect, opt) - else: - return default - - cp.get_option = types.MethodType(get_option_default, cp, configparser.ConfigParser) - - return cp - - def enable(self): - ''' - Enable the system to receive updates from subscription-manager. - This involves updating affected yum plugins and removing any - conflicting yum repositories. - ''' - RegistrationBase.enable(self) - self.update_plugin_conf('rhnplugin', False) - self.update_plugin_conf('subscription-manager', True) - - def configure(self, **kwargs): - ''' - Configure the system as directed for registration with RHN - Raises: - * Exception - if error occurs while running command - ''' - args = ['subscription-manager', 'config'] - - # Pass supplied **kwargs as parameters to subscription-manager. Ignore - # non-configuration parameters and replace '_' with '.'. For example, - # 'server_hostname' becomes '--system.hostname'. - for k, v in kwargs.items(): - if re.search(r'^(system|rhsm)_', k): - args.append('--%s=%s' % (k.replace('_', '.'), v)) - - self.module.run_command(args, check_rc=True) - - @property - def is_registered(self): - ''' - Determine whether the current system - Returns: - * Boolean - whether the current system is currently registered to - RHN. - ''' - args = ['subscription-manager', 'identity'] - rc, stdout, stderr = self.module.run_command(args, check_rc=False) - if rc == 0: - return True - else: - return False - - def register(self, username, password, autosubscribe, activationkey): - ''' - Register the current system to the provided RHN server - Raises: - * Exception - if error occurs while running command - ''' - args = ['subscription-manager', 'register'] - - # Generate command arguments - if activationkey: - args.append('--activationkey "%s"' % activationkey) - else: - if autosubscribe: - args.append('--autosubscribe') - if username: - args.extend(['--username', username]) - if password: - args.extend(['--password', password]) - - # Do the needful... - rc, stderr, stdout = self.module.run_command(args, check_rc=True) - - def unsubscribe(self): - ''' - Unsubscribe a system from all subscribed channels - Raises: - * Exception - if error occurs while running command - ''' - args = ['subscription-manager', 'unsubscribe', '--all'] - rc, stderr, stdout = self.module.run_command(args, check_rc=True) - - def unregister(self): - ''' - Unregister a currently registered system - Raises: - * Exception - if error occurs while running command - ''' - args = ['subscription-manager', 'unregister'] - rc, stderr, stdout = self.module.run_command(args, check_rc=True) - self.update_plugin_conf('rhnplugin', False) - self.update_plugin_conf('subscription-manager', False) - - def subscribe(self, regexp): - ''' - Subscribe current system to available pools matching the specified - regular expression - Raises: - * Exception - if error occurs while running command - ''' - - # Available pools ready for subscription - available_pools = RhsmPools(self.module) - - for pool in available_pools.filter(regexp): - pool.subscribe() - - -class RhsmPool(object): - ''' - Convenience class for housing subscription information - ''' - - def __init__(self, module, **kwargs): - self.module = module - for k, v in kwargs.items(): - setattr(self, k, v) - - def __str__(self): - return str(self.__getattribute__('_name')) - - def subscribe(self): - args = "subscription-manager subscribe --pool %s" % self.PoolId - rc, stdout, stderr = self.module.run_command(args, check_rc=True) - if rc == 0: - return True - else: - return False - - -class RhsmPools(object): - """ - This class is used for manipulating pools subscriptions with RHSM - """ - def __init__(self, module): - self.module = module - self.products = self._load_product_list() - - def __iter__(self): - return self.products.__iter__() - - def _load_product_list(self): - """ - Loads list of all available pools for system in data structure - """ - args = "subscription-manager list --available" - rc, stdout, stderr = self.module.run_command(args, check_rc=True) - - products = [] - for line in stdout.split('\n'): - # Remove leading+trailing whitespace - line = line.strip() - # An empty line implies the end of an output group - if len(line) == 0: - continue - # If a colon ':' is found, parse - elif ':' in line: - (key, value) = line.split(':', 1) - key = key.strip().replace(" ", "") # To unify - value = value.strip() - if key in ['ProductName', 'SubscriptionName']: - # Remember the name for later processing - products.append(RhsmPool(self.module, _name=value, key=value)) - elif products: - # Associate value with most recently recorded product - products[-1].__setattr__(key, value) - # FIXME - log some warning? - # else: - # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value)) - return products - - def filter(self, regexp='^$'): - ''' - Return a list of RhsmPools whose name matches the provided regular expression - ''' - r = re.compile(regexp) - for product in self.products: - if r.search(product._name): - yield product diff --git a/plugins/module_utils/redis.py b/plugins/module_utils/redis.py index c4d87aca51..d3de8e63e9 100644 --- a/plugins/module_utils/redis.py +++ b/plugins/module_utils/redis.py @@ -1,13 +1,11 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2021, Andreas Botzner # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations from ansible.module_utils.basic import missing_required_lib -__metaclass__ = type import traceback @@ -57,7 +55,9 @@ def redis_auth_argument_spec(tls_default=True): validate_certs=dict(type='bool', default=True ), - ca_certs=dict(type='str') + ca_certs=dict(type='str'), + client_cert_file=dict(type='str'), + client_key_file=dict(type='str'), ) @@ -71,6 +71,8 @@ def redis_auth_params(module): ca_certs = module.params['ca_certs'] if tls and ca_certs is None: ca_certs = str(certifi.where()) + client_cert_file = module.params['client_cert_file'] + client_key_file = module.params['client_key_file'] if tuple(map(int, redis_version.split('.'))) < (3, 4, 0) and login_user is not None: module.fail_json( msg='The option `username` in only supported with redis >= 3.4.0.') @@ -78,6 +80,8 @@ def redis_auth_params(module): 'port': login_port, 'password': login_password, 'ssl_ca_certs': ca_certs, + 'ssl_certfile': client_cert_file, + 'ssl_keyfile': client_key_file, 'ssl_cert_reqs': validate_certs, 'ssl': tls} if login_user is not None: @@ -96,5 +100,5 @@ class RedisAnsible(object): try: return Redis(**redis_auth_params(self.module)) except Exception as e: - self.module.fail_json(msg='{0}'.format(str(e))) + self.module.fail_json(msg=f'{e}') return None diff --git a/plugins/module_utils/remote_management/lxca/common.py b/plugins/module_utils/remote_management/lxca/common.py index 0fe8c32077..1f06839d39 100644 --- a/plugins/module_utils/remote_management/lxca/common.py +++ b/plugins/module_utils/remote_management/lxca/common.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by @@ -12,8 +11,7 @@ # Contains LXCA common class # Lenovo xClarity Administrator (LXCA) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import traceback try: diff --git a/plugins/module_utils/rundeck.py b/plugins/module_utils/rundeck.py index dd83eeccaf..7b9f56339a 100644 --- a/plugins/module_utils/rundeck.py +++ b/plugins/module_utils/rundeck.py @@ -1,11 +1,9 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2021, Phillipe Smith # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import json @@ -28,7 +26,7 @@ def api_argument_spec(): return api_argument_spec -def api_request(module, endpoint, data=None, method="GET"): +def api_request(module, endpoint, data=None, method="GET", content_type="application/json"): """Manages Rundeck API requests via HTTP(S) :arg module: The AnsibleModule (used to get url, api_version, api_token, etc). @@ -55,15 +53,11 @@ def api_request(module, endpoint, data=None, method="GET"): response, info = fetch_url( module=module, - url="%s/api/%s/%s" % ( - module.params["url"], - module.params["api_version"], - endpoint - ), + url=f"{module.params['url']}/api/{module.params['api_version']}/{endpoint}", data=json.dumps(data), method=method, headers={ - "Content-Type": "application/json", + "Content-Type": content_type, "Accept": "application/json", "X-Rundeck-Auth-Token": module.params["api_token"] } @@ -72,7 +66,9 @@ def api_request(module, endpoint, data=None, method="GET"): if info["status"] == 403: module.fail_json(msg="Token authorization failed", execution_info=json.loads(info["body"])) - if info["status"] == 409: + elif info["status"] == 404: + return None, info + elif info["status"] == 409: module.fail_json(msg="Job executions limit reached", execution_info=json.loads(info["body"])) elif info["status"] >= 500: @@ -81,12 +77,18 @@ def api_request(module, endpoint, data=None, method="GET"): try: content = response.read() - json_response = json.loads(content) - return json_response, info + + if not content: + return None, info + else: + json_response = json.loads(content) + return json_response, info except AttributeError as error: - module.fail_json(msg="Rundeck API request error", - exception=to_native(error), - execution_info=info) + module.fail_json( + msg="Rundeck API request error", + exception=to_native(error), + execution_info=info + ) except ValueError as error: module.fail_json( msg="No valid JSON response", diff --git a/plugins/module_utils/saslprep.py b/plugins/module_utils/saslprep.py index 29bb49b702..b02cedd874 100644 --- a/plugins/module_utils/saslprep.py +++ b/plugins/module_utils/saslprep.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. @@ -11,8 +10,7 @@ # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from stringprep import ( in_table_a1, @@ -31,11 +29,9 @@ from stringprep import ( ) from unicodedata import normalize -from ansible.module_utils.six import text_type - def is_unicode_str(string): - return True if isinstance(string, text_type) else False + return True if isinstance(string, str) else False def mapping_profile(string): @@ -54,11 +50,11 @@ def mapping_profile(string): if in_table_c12(c): # map non-ASCII space characters # (that can be mapped) to Unicode space - tmp.append(u' ') + tmp.append(' ') else: tmp.append(c) - return u"".join(tmp) + return "".join(tmp) def is_ral_string(string): @@ -109,35 +105,31 @@ def prohibited_output_profile(string): for c in string: # RFC4013 2.3. Prohibited Output: if in_table_c12(c): - raise ValueError('%s: prohibited non-ASCII space characters ' - 'that cannot be replaced (C.1.2).' % RFC) + raise ValueError(f'{RFC}: prohibited non-ASCII space characters that cannot be replaced (C.1.2).') if in_table_c21_c22(c): - raise ValueError('%s: prohibited control characters (C.2.1).' % RFC) + raise ValueError(f'{RFC}: prohibited control characters (C.2.1).') if in_table_c3(c): - raise ValueError('%s: prohibited private Use characters (C.3).' % RFC) + raise ValueError(f'{RFC}: prohibited private Use characters (C.3).') if in_table_c4(c): - raise ValueError('%s: prohibited non-character code points (C.4).' % RFC) + raise ValueError(f'{RFC}: prohibited non-character code points (C.4).') if in_table_c5(c): - raise ValueError('%s: prohibited surrogate code points (C.5).' % RFC) + raise ValueError(f'{RFC}: prohibited surrogate code points (C.5).') if in_table_c6(c): - raise ValueError('%s: prohibited inappropriate for plain text ' - 'characters (C.6).' % RFC) + raise ValueError(f'{RFC}: prohibited inappropriate for plain text characters (C.6).') if in_table_c7(c): - raise ValueError('%s: prohibited inappropriate for canonical ' - 'representation characters (C.7).' % RFC) + raise ValueError(f'{RFC}: prohibited inappropriate for canonical representation characters (C.7).') if in_table_c8(c): - raise ValueError('%s: prohibited change display properties / ' - 'deprecated characters (C.8).' % RFC) + raise ValueError(f'{RFC}: prohibited change display properties / deprecated characters (C.8).') if in_table_c9(c): - raise ValueError('%s: prohibited tagging characters (C.9).' % RFC) + raise ValueError(f'{RFC}: prohibited tagging characters (C.9).') # RFC4013, 2.4. Bidirectional Characters: if is_prohibited_bidi_ch(c): - raise ValueError('%s: prohibited bidi characters (%s).' % (RFC, bidi_table)) + raise ValueError(f'{RFC}: prohibited bidi characters ({bidi_table}).') # RFC4013, 2.5. Unassigned Code Points: if in_table_a1(c): - raise ValueError('%s: prohibited unassigned code points (A.1).' % RFC) + raise ValueError(f'{RFC}: prohibited unassigned code points (A.1).') def saslprep(string): @@ -158,9 +150,8 @@ def saslprep(string): # RFC4013: "The algorithm assumes all strings are # comprised of characters from the Unicode [Unicode] character set." # Validate the string is a Unicode string - # (text_type is the string type if PY3 and unicode otherwise): if not is_unicode_str(string): - raise TypeError('input must be of type %s, not %s' % (text_type, type(string))) + raise TypeError(f'input must be of type str, not {type(string)}') # RFC4013: 2.1. Mapping. string = mapping_profile(string) @@ -169,7 +160,7 @@ def saslprep(string): # "This profile specifies using Unicode normalization form KC." string = normalize('NFKC', string) if not string: - return u'' + return '' # RFC4013: 2.3. Prohibited Output. # RFC4013: 2.4. Bidirectional Characters. diff --git a/plugins/module_utils/scaleway.py b/plugins/module_utils/scaleway.py index 43f2094800..0798e61317 100644 --- a/plugins/module_utils/scaleway.py +++ b/plugins/module_utils/scaleway.py @@ -1,10 +1,8 @@ -# -*- coding: utf-8 -*- # Copyright (c) Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import json import re @@ -12,10 +10,14 @@ import sys import datetime import time import traceback +from urllib.parse import urlencode from ansible.module_utils.basic import env_fallback, missing_required_lib from ansible.module_utils.urls import fetch_url -from ansible.module_utils.six.moves.urllib.parse import urlencode + +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) SCALEWAY_SECRET_IMP_ERR = None try: @@ -47,11 +49,11 @@ def scaleway_waitable_resource_argument_spec(): def payload_from_object(scw_object): - return dict( - (k, v) + return { + k: v for k, v in scw_object.items() if k != 'id' and v is not None - ) + } class ScalewayException(Exception): @@ -105,18 +107,12 @@ class SecretVariables(object): @staticmethod def dict_to_list(source_dict): - return [ - dict(key=var[0], value=var[1]) - for var in source_dict.items() - ] + return [dict(key=k, value=v) for k, v in source_dict.items()] @staticmethod def list_to_dict(source_list, hashed=False): key_value = 'hashed_value' if hashed else 'value' - return dict( - (var['key'], var[key_value]) - for var in source_list - ) + return {var['key']: var[key_value] for var in source_list} @classmethod def decode(cls, secrets_list, values_list): @@ -139,7 +135,7 @@ def resource_attributes_should_be_changed(target, wished, verifiable_mutable_att diff[attr] = wished[attr] if diff: - return dict((attr, wished[attr]) for attr in mutable_attributes) + return {attr: wished[attr] for attr in mutable_attributes} else: return diff @@ -303,13 +299,13 @@ class Scaleway(object): wait_timeout = self.module.params["wait_timeout"] wait_sleep_time = self.module.params["wait_sleep_time"] - # Prevent requesting the ressource status too soon + # Prevent requesting the resource status too soon time.sleep(wait_sleep_time) - start = datetime.datetime.utcnow() + start = now() end = start + datetime.timedelta(seconds=wait_timeout) - while datetime.datetime.utcnow() < end: + while now() < end: self.module.debug("We are going to wait for the resource to finish its transition") state = self.fetch_state(resource) @@ -352,11 +348,18 @@ SCALEWAY_LOCATION = { 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-2' }, + 'par3': { + 'name': 'Paris 3', + 'country': 'FR', + 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/fr-par-3', + 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/fr-par-3' + }, + 'ams1': { 'name': 'Amsterdam 1', 'country': 'NL', 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/nl-ams-1', - 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/nl-ams-10' + 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/nl-ams-1' }, 'EMEA-NL-EVS': { @@ -366,6 +369,20 @@ SCALEWAY_LOCATION = { 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/nl-ams-1' }, + 'ams2': { + 'name': 'Amsterdam 2', + 'country': 'NL', + 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/nl-ams-2', + 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/nl-ams-2' + }, + + 'ams3': { + 'name': 'Amsterdam 3', + 'country': 'NL', + 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/nl-ams-3', + 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/nl-ams-3' + }, + 'waw1': { 'name': 'Warsaw 1', 'country': 'PL', @@ -379,6 +396,20 @@ SCALEWAY_LOCATION = { 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/pl-waw-1', 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/pl-waw-1' }, + + 'waw2': { + 'name': 'Warsaw 2', + 'country': 'PL', + 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/pl-waw-2', + 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/pl-waw-2' + }, + + 'waw3': { + 'name': 'Warsaw 3', + 'country': 'PL', + 'api_endpoint': 'https://api.scaleway.com/instance/v1/zones/pl-waw-3', + 'api_endpoint_vpc': 'https://api.scaleway.com/vpc/v1/zones/pl-waw-3' + }, } SCALEWAY_ENDPOINT = "https://api.scaleway.com" @@ -392,6 +423,11 @@ SCALEWAY_REGIONS = [ SCALEWAY_ZONES = [ "fr-par-1", "fr-par-2", + "fr-par-3", "nl-ams-1", + "nl-ams-2", + "nl-ams-3", "pl-waw-1", + "pl-waw-2", + "pl-waw-3", ] diff --git a/plugins/module_utils/snap.py b/plugins/module_utils/snap.py new file mode 100644 index 0000000000..d672a7b519 --- /dev/null +++ b/plugins/module_utils/snap.py @@ -0,0 +1,53 @@ +# Copyright (c) 2023, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +_alias_state_map = dict( + present='alias', + absent='unalias', + info='aliases', +) + +_state_map = dict( + present='install', + absent='remove', + enabled='enable', + disabled='disable', + refresh='refresh', +) + + +def snap_runner(module, **kwargs): + runner = CmdRunner( + module, + "snap", + arg_formats=dict( + state_alias=cmd_runner_fmt.as_map(_alias_state_map), # snap_alias only + name=cmd_runner_fmt.as_list(), + alias=cmd_runner_fmt.as_list(), # snap_alias only + state=cmd_runner_fmt.as_map(_state_map), + _list=cmd_runner_fmt.as_fixed("list"), + _set=cmd_runner_fmt.as_fixed("set"), + get=cmd_runner_fmt.as_fixed(["get", "-d"]), + classic=cmd_runner_fmt.as_bool("--classic"), + channel=cmd_runner_fmt.as_func(lambda v: [] if v == 'stable' else ['--channel', f'{v}']), + options=cmd_runner_fmt.as_list(), + info=cmd_runner_fmt.as_fixed("info"), + dangerous=cmd_runner_fmt.as_bool("--dangerous"), + version=cmd_runner_fmt.as_fixed("version"), + ), + check_rc=False, + **kwargs + ) + return runner + + +def get_version(runner): + with runner("version") as ctx: + rc, out, err = ctx.run() + return dict(x.split() for x in out.splitlines() if len(x.split()) == 2) diff --git a/plugins/module_utils/source_control/bitbucket.py b/plugins/module_utils/source_control/bitbucket.py index 9a27361830..a3d3fa5f2f 100644 --- a/plugins/module_utils/source_control/bitbucket.py +++ b/plugins/module_utils/source_control/bitbucket.py @@ -1,10 +1,8 @@ -# -*- coding: utf-8 -*- # Copyright (c) Ansible project # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import json @@ -56,14 +54,14 @@ class BitbucketHelper: if info['status'] == 200: self.access_token = content['access_token'] else: - self.module.fail_json(msg='Failed to retrieve access token: {0}'.format(info)) + self.module.fail_json(msg=f'Failed to retrieve access token: {info}') def request(self, api_url, method, data=None, headers=None): headers = headers or {} if self.access_token: headers.update({ - 'Authorization': 'Bearer {0}'.format(self.access_token), + 'Authorization': f'Bearer {self.access_token}', }) elif self.module.params['user'] and self.module.params['password']: headers.update({ diff --git a/plugins/module_utils/ssh.py b/plugins/module_utils/ssh.py index 082839e26d..851efcbe86 100644 --- a/plugins/module_utils/ssh.py +++ b/plugins/module_utils/ssh.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2015, Björn Andersson # Copyright (c) 2021, Ansible Project # Copyright (c) 2021, Abhijeet Kasurde @@ -6,8 +5,7 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import os @@ -15,7 +13,7 @@ import os def determine_config_file(user, config_file): if user: - config_file = os.path.join(os.path.expanduser('~%s' % user), '.ssh', 'config') + config_file = os.path.join(os.path.expanduser(f'~{user}'), '.ssh', 'config') elif config_file is None: config_file = '/etc/ssh/ssh_config' return config_file diff --git a/plugins/module_utils/storage/emc/emc_vnx.py b/plugins/module_utils/storage/emc/emc_vnx.py index 2e391b8fbe..b6a4d30463 100644 --- a/plugins/module_utils/storage/emc/emc_vnx.py +++ b/plugins/module_utils/storage/emc/emc_vnx.py @@ -1,10 +1,8 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018 Luca 'remix_tj' Lorenzetto # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations emc_vnx_argument_spec = { diff --git a/plugins/module_utils/storage/hpe3par/hpe3par.py b/plugins/module_utils/storage/hpe3par/hpe3par.py index 3d164ce746..da88db1ce6 100644 --- a/plugins/module_utils/storage/hpe3par/hpe3par.py +++ b/plugins/module_utils/storage/hpe3par/hpe3par.py @@ -1,10 +1,8 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2018, Hewlett Packard Enterprise Development LP # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations from ansible.module_utils import basic @@ -22,7 +20,7 @@ def convert_to_binary_multiple(size_with_unit): if float(size) < 0: return -1 if not valid_unit: - raise ValueError("%s does not have a valid unit. The unit must be one of %s" % (size_with_unit, valid_units)) + raise ValueError(f"{size_with_unit} does not have a valid unit. The unit must be one of {valid_units}") size = size_with_unit.replace(" ", "").split('iB')[0] size_kib = basic.human_to_bytes(size) diff --git a/plugins/module_utils/systemd.py b/plugins/module_utils/systemd.py new file mode 100644 index 0000000000..00ce292feb --- /dev/null +++ b/plugins/module_utils/systemd.py @@ -0,0 +1,32 @@ +# Copyright (c) 2025, Marco Noce +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +def systemd_runner(module, command, **kwargs): + arg_formats = dict( + version=cmd_runner_fmt.as_fixed("--version"), + list_units=cmd_runner_fmt.as_fixed(["list-units", "--no-pager"]), + types=cmd_runner_fmt.as_func(lambda v: [] if not v else ["--type", ",".join(v)]), + all=cmd_runner_fmt.as_fixed("--all"), + plain=cmd_runner_fmt.as_fixed("--plain"), + no_legend=cmd_runner_fmt.as_fixed("--no-legend"), + show=cmd_runner_fmt.as_fixed("show"), + props=cmd_runner_fmt.as_func(lambda v: [] if not v else ["-p", ",".join(v)]), + dashdash=cmd_runner_fmt.as_fixed("--"), + unit=cmd_runner_fmt.as_list(), + ) + + runner = CmdRunner( + module, + command=command, + arg_formats=arg_formats, + check_rc=True, + **kwargs + ) + return runner diff --git a/plugins/module_utils/univention_umc.py b/plugins/module_utils/univention_umc.py index b08f39e306..1475a91542 100644 --- a/plugins/module_utils/univention_umc.py +++ b/plugins/module_utils/univention_umc.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. @@ -12,8 +11,7 @@ # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations """Univention Corporate Server (UCS) access module. @@ -91,7 +89,7 @@ def uldap(): def construct(): try: secret_file = open('/etc/ldap.secret', 'r') - bind_dn = 'cn=admin,{0}'.format(base_dn()) + bind_dn = f'cn=admin,{base_dn()}' except IOError: # pragma: no cover secret_file = open('/etc/machine.secret', 'r') bind_dn = config_registry()["ldap/hostdn"] @@ -188,7 +186,7 @@ def module_by_name(module_name_): univention.admin.modules.init(uldap(), position_base_dn(), module) return module - return _singleton('module/%s' % module_name_, construct) + return _singleton(f'module/{module_name_}', construct) def get_umc_admin_objects(): diff --git a/plugins/module_utils/utm_utils.py b/plugins/module_utils/utm_utils.py index 712450cd2a..2e7432fb38 100644 --- a/plugins/module_utils/utm_utils.py +++ b/plugins/module_utils/utm_utils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -10,8 +9,7 @@ # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations import json @@ -73,8 +71,9 @@ class UTM: """ self.info_only = info_only self.module = module - self.request_url = module.params.get('utm_protocol') + "://" + module.params.get('utm_host') + ":" + to_native( - module.params.get('utm_port')) + "/api/objects/" + endpoint + "/" + self.request_url = ( + f"{module.params.get('utm_protocol')}://{module.params.get('utm_host')}:{module.params.get('utm_port')}/api/objects/{endpoint}/" + ) """ The change_relevant_keys will be checked for changes to determine whether the object needs to be updated @@ -84,9 +83,8 @@ class UTM: self.module.params['url_password'] = module.params.get('utm_token') if all(elem in self.change_relevant_keys for elem in module.params.keys()): raise UTMModuleConfigurationError( - "The keys " + to_native( - self.change_relevant_keys) + " to check are not in the modules keys:\n" + to_native( - list(module.params.keys()))) + f"The keys {self.change_relevant_keys} to check are not in the modules keys:\n{list(module.params.keys())}" + ) def execute(self): try: @@ -185,7 +183,7 @@ class UTM: result = None if response is not None: results = json.loads(response.read()) - result = next(iter(filter(lambda d: d['name'] == module.params.get('name'), results)), None) + result = next((d for d in results if d['name'] == module.params.get('name')), None) return info, result def _clean_result(self, result): diff --git a/plugins/module_utils/vardict.py b/plugins/module_utils/vardict.py new file mode 100644 index 0000000000..ccea7d5bb6 --- /dev/null +++ b/plugins/module_utils/vardict.py @@ -0,0 +1,196 @@ +# (c) 2023, Alexei Znamensky +# Copyright (c) 2023, Ansible Project +# Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) +# SPDX-License-Identifier: BSD-2-Clause + +from __future__ import annotations + +import copy + + +class _Variable(object): + NOTHING = object() + + def __init__(self, diff=False, output=True, change=None, fact=False, verbosity=0): + self.init = False + self.initial_value = None + self.value = None + + self.diff = None + self._change = None + self.output = None + self.fact = None + self._verbosity = None + self.set_meta(output=output, diff=diff, change=change, fact=fact, verbosity=verbosity) + + def getchange(self): + return self.diff if self._change is None else self._change + + def setchange(self, value): + self._change = value + + def getverbosity(self): + return self._verbosity + + def setverbosity(self, v): + if not (0 <= v <= 4): + raise ValueError("verbosity must be an int in the range 0 to 4") + self._verbosity = v + + change = property(getchange, setchange) + verbosity = property(getverbosity, setverbosity) + + def set_meta(self, output=None, diff=None, change=None, fact=None, initial_value=NOTHING, verbosity=None): + """Set the metadata for the variable + + Args: + output (bool, optional): flag indicating whether the variable should be in the output of the module. Defaults to None. + diff (bool, optional): flag indicating whether to generate diff mode output for this variable. Defaults to None. + change (bool, optional): flag indicating whether to track if changes happened to this variable. Defaults to None. + fact (bool, optional): flag indicating whether the variable should be exposed as a fact of the module. Defaults to None. + initial_value (any, optional): initial value of the variable, to be used with `change`. Defaults to NOTHING. + verbosity (int, optional): level of verbosity in which this variable is reported by the module as `output`, `fact` or `diff`. Defaults to None. + """ + if output is not None: + self.output = output + if change is not None: + self.change = change + if diff is not None: + self.diff = diff + if fact is not None: + self.fact = fact + if initial_value is not _Variable.NOTHING: + self.initial_value = copy.deepcopy(initial_value) + if verbosity is not None: + self.verbosity = verbosity + + def as_dict(self, meta_only=False): + d = { + "diff": self.diff, + "change": self.change, + "output": self.output, + "fact": self.fact, + "verbosity": self.verbosity, + } + if not meta_only: + d["initial_value"] = copy.deepcopy(self.initial_value) + d["value"] = self.value + return d + + def set_value(self, value): + if not self.init: + self.initial_value = copy.deepcopy(value) + self.init = True + self.value = value + return self + + def is_visible(self, verbosity): + return self.verbosity <= verbosity + + @property + def has_changed(self): + return self.change and (self.initial_value != self.value) + + @property + def diff_result(self): + if self.diff and self.has_changed: + return {'before': self.initial_value, 'after': self.value} + return + + def __str__(self): + return ( + f"" + ) + + +class VarDict(object): + reserved_names = ('__vars__', '_var', 'var', 'set_meta', 'get_meta', 'set', 'output', 'diff', 'facts', 'has_changed', 'as_dict') + + def __init__(self): + self.__vars__ = dict() + + def __getitem__(self, item): + return self.__vars__[item].value + + def __setitem__(self, key, value): + self.set(key, value) + + def __getattr__(self, item): + try: + return self.__vars__[item].value + except KeyError: + return getattr(super(VarDict, self), item) + + def __setattr__(self, key, value): + if key == '__vars__': + super(VarDict, self).__setattr__(key, value) + else: + self.set(key, value) + + def _var(self, name): + return self.__vars__[name] + + def var(self, name): + return self._var(name).as_dict() + + def set_meta(self, name, **kwargs): + """Set the metadata for the variable + + Args: + name (str): name of the variable having its metadata changed + output (bool, optional): flag indicating whether the variable should be in the output of the module. Defaults to None. + diff (bool, optional): flag indicating whether to generate diff mode output for this variable. Defaults to None. + change (bool, optional): flag indicating whether to track if changes happened to this variable. Defaults to None. + fact (bool, optional): flag indicating whether the variable should be exposed as a fact of the module. Defaults to None. + initial_value (any, optional): initial value of the variable, to be used with `change`. Defaults to NOTHING. + verbosity (int, optional): level of verbosity in which this variable is reported by the module as `output`, `fact` or `diff`. Defaults to None. + """ + self._var(name).set_meta(**kwargs) + + def get_meta(self, name): + return self._var(name).as_dict(meta_only=True) + + def set(self, name, value, **kwargs): + """Set the value and optionally metadata for a variable. The variable is not required to exist prior to calling `set`. + + For details on the accepted metada see the documentation for method `set_meta`. + + Args: + name (str): name of the variable being changed + value (any): the value of the variable, it can be of any type + + Raises: + ValueError: Raised if trying to set a variable with a reserved name. + """ + if name in self.reserved_names: + raise ValueError(f"Name {name} is reserved") + if name in self.__vars__: + var = self._var(name) + var.set_meta(**kwargs) + else: + var = _Variable(**kwargs) + var.set_value(value) + self.__vars__[name] = var + + def output(self, verbosity=0): + return {n: v.value for n, v in self.__vars__.items() if v.output and v.is_visible(verbosity)} + + def diff(self, verbosity=0): + diff_results = [(n, v.diff_result) for n, v in self.__vars__.items() if v.diff_result and v.is_visible(verbosity)] + if diff_results: + before = {n: dr['before'] for n, dr in diff_results} + after = {n: dr['after'] for n, dr in diff_results} + return {'before': before, 'after': after} + return None + + def facts(self, verbosity=0): + facts_result = {n: v.value for n, v in self.__vars__.items() if v.fact and v.is_visible(verbosity)} + return facts_result if facts_result else None + + @property + def has_changed(self): + return any(var.has_changed for var in self.__vars__.values()) + + def as_dict(self): + return {name: var.value for name, var in self.__vars__.items()} diff --git a/plugins/module_utils/version.py b/plugins/module_utils/version.py index b671e59628..18cd6d12fe 100644 --- a/plugins/module_utils/version.py +++ b/plugins/module_utils/version.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2021, Felix Fontein # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -6,17 +5,7 @@ """Provide version object to compare version numbers.""" -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -from ansible.module_utils.six import raise_from - -try: - from ansible.module_utils.compat.version import LooseVersion -except ImportError: - try: - from distutils.version import LooseVersion - except ImportError as exc: - msg = 'To use this plugin or module with ansible-core 2.11, you need to use Python < 3.12 with distutils.version present' - raise_from(ImportError(msg), exc) +from ansible.module_utils.compat.version import LooseVersion # noqa: F401, pylint: disable=unused-import diff --git a/plugins/module_utils/vexata.py b/plugins/module_utils/vexata.py index 2ea56a3b05..ed0b11480c 100644 --- a/plugins/module_utils/vexata.py +++ b/plugins/module_utils/vexata.py @@ -1,11 +1,9 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2019, Sandeep Kasargod # Simplified BSD License (see LICENSES/BSD-2-Clause.txt or https://opensource.org/licenses/BSD-2-Clause) # SPDX-License-Identifier: BSD-2-Clause -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations HAS_VEXATAPI = True @@ -14,7 +12,6 @@ try: except ImportError: HAS_VEXATAPI = False -from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.basic import env_fallback VXOS_VERSION = None @@ -23,10 +20,9 @@ VXOS_VERSION = None def get_version(iocs_json): if not iocs_json: raise Exception('Invalid IOC json') - active = filter(lambda x: x['mgmtRole'], iocs_json) - if not active: + active = next((x for x in iocs_json if x['mgmtRole']), None) + if active is None: raise Exception('Unable to detect active IOC') - active = active[0] ver = active['swVersion'] if ver[0] != 'v': raise Exception('Illegal version string') @@ -60,7 +56,7 @@ def get_array(module): else: module.fail_json(msg='Test connection to array failed.') except Exception as e: - module.fail_json(msg='Vexata API access failed: {0}'.format(to_native(e))) + module.fail_json(msg=f'Vexata API access failed: {e}') def argument_spec(): diff --git a/plugins/module_utils/wdc_redfish_utils.py b/plugins/module_utils/wdc_redfish_utils.py index d27e02d7b7..564be3829e 100644 --- a/plugins/module_utils/wdc_redfish_utils.py +++ b/plugins/module_utils/wdc_redfish_utils.py @@ -1,22 +1,20 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2022 Western Digital Corporation # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import datetime import re import time import tarfile +import os +from urllib.parse import urlparse, urlunparse from ansible.module_utils.urls import fetch_file from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils -from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse - class WdcRedfishUtils(RedfishUtils): """Extension to RedfishUtils to support WDC enclosures.""" @@ -66,7 +64,7 @@ class WdcRedfishUtils(RedfishUtils): A URI is considered good if we can GET uri/redfish/v1. """ for root_uri in root_uris: - uri = root_uri + "/redfish/v1" + uri = f"{root_uri}/redfish/v1" response = self.get_request(uri) if response['ret']: self.root_uri = root_uri @@ -79,19 +77,25 @@ class WdcRedfishUtils(RedfishUtils): return response return self._find_updateservice_additional_uris() - def _is_enclosure_multi_tenant(self): + def _is_enclosure_multi_tenant_and_fetch_gen(self): """Determine if the enclosure is multi-tenant. The serial number of a multi-tenant enclosure will end in "-A" or "-B". + Fetching enclsoure generation. - :return: True/False if the enclosure is multi-tenant or not; None if unable to determine. + :return: True/False if the enclosure is multi-tenant or not and return enclosure generation; + None if unable to determine. """ - response = self.get_request(self.root_uri + self.service_root + "Chassis/Enclosure") + response = self.get_request(f"{self.root_uri}{self.service_root}Chassis/Enclosure") if response['ret'] is False: return None pattern = r".*-[A,B]" data = response['data'] - return re.match(pattern, data['SerialNumber']) is not None + if 'EnclVersion' not in data: + enc_version = 'G1' + else: + enc_version = data['EnclVersion'] + return re.match(pattern, data['SerialNumber']) is not None, enc_version def _find_updateservice_additional_uris(self): """Find & set WDC-specific update service URIs""" @@ -110,7 +114,7 @@ class WdcRedfishUtils(RedfishUtils): # Simple update status URI is not provided via GET /redfish/v1/UpdateService # So we have to hard code it. - self.simple_update_status_uri = "{0}/Status".format(self.simple_update_uri) + self.simple_update_status_uri = f"{self.simple_update_uri}/Status" # FWActivate URI if 'Oem' not in data['Actions']: @@ -180,15 +184,44 @@ class WdcRedfishUtils(RedfishUtils): To determine if the bundle is multi-tenant or not, it looks inside the .bin file within the tarfile, and checks the appropriate byte in the file. + If not tarfile, the bundle is checked for 2048th byte to determine whether it is Gen2 bundle. + Gen2 is always single tenant at this time. + :param str bundle_uri: HTTP URI of the firmware bundle. - :return: Firmware version number contained in the bundle, and whether or not the bundle is multi-tenant. - Either value will be None if unable to deterine. + :return: Firmware version number contained in the bundle, whether or not the bundle is multi-tenant + and bundle generation. Either value will be None if unable to determine. :rtype: str or None, bool or None """ bundle_temp_filename = fetch_file(module=self.module, url=bundle_uri) + bundle_version = None + is_multi_tenant = None + gen = None + + # If not tarfile, then if the file has "MMG2" or "DPG2" at 2048th byte + # then the bundle is for MM or DP G2 if not tarfile.is_tarfile(bundle_temp_filename): - return None, None + cookie1 = None + with open(bundle_temp_filename, "rb") as bundle_file: + file_size = os.path.getsize(bundle_temp_filename) + if file_size >= 2052: + bundle_file.seek(2048) + cookie1 = bundle_file.read(4) + # It is anticipated that DP firmware bundle will be having the value "DPG2" + # for cookie1 in the header + if cookie1 and cookie1.decode("utf8") == "MMG2" or cookie1.decode("utf8") == "DPG2": + file_name, ext = os.path.splitext(str(bundle_uri.rsplit('/', 1)[1])) + # G2 bundle file name: Ultrastar-Data102_3000_SEP_1010-032_2.1.12 + parsedFileName = file_name.split('_') + if len(parsedFileName) == 5: + bundle_version = parsedFileName[4] + # MM G2 is always single tanant + is_multi_tenant = False + gen = "G2" + + return bundle_version, is_multi_tenant, gen + + # Bundle is for MM or DP G1 tf = tarfile.open(bundle_temp_filename) pattern_pkg = r"oobm-(.+)\.pkg" pattern_bin = r"(.*\.bin)" @@ -205,8 +238,9 @@ class WdcRedfishUtils(RedfishUtils): bin_file.seek(11) byte_11 = bin_file.read(1) is_multi_tenant = byte_11 == b'\x80' + gen = "G1" - return bundle_version, is_multi_tenant + return bundle_version, is_multi_tenant, gen @staticmethod def uri_is_http(uri): @@ -233,9 +267,7 @@ class WdcRedfishUtils(RedfishUtils): parsed_url = urlparse(update_opts["update_image_uri"]) if update_creds: original_netloc = parsed_url.netloc - parsed_url = parsed_url._replace(netloc="{0}:{1}@{2}".format(update_creds.get("username"), - update_creds.get("password"), - original_netloc)) + parsed_url = parsed_url._replace(netloc=f"{update_creds.get('username')}:{update_creds.get('password')}@{original_netloc}") update_opts["update_image_uri"] = urlunparse(parsed_url) del update_opts["update_creds"] @@ -260,29 +292,32 @@ class WdcRedfishUtils(RedfishUtils): ]: return { 'ret': False, - 'msg': 'Target is not ready for FW update. Current status: {0} ({1})'.format( - status_code, status_description - )} + 'msg': f'Target is not ready for FW update. Current status: {status_code} ({status_description})'} # Check the FW version in the bundle file, and compare it to what is already on the IOMs # Bundle version number - bundle_firmware_version, is_bundle_multi_tenant = self._get_bundle_version(bundle_uri) - if bundle_firmware_version is None or is_bundle_multi_tenant is None: + bundle_firmware_version, is_bundle_multi_tenant, bundle_gen = self._get_bundle_version(bundle_uri) + if bundle_firmware_version is None or is_bundle_multi_tenant is None or bundle_gen is None: return { 'ret': False, - 'msg': 'Unable to extract bundle version or multi-tenant status from update image tarfile' + 'msg': 'Unable to extract bundle version or multi-tenant status or generation from update image file' } + is_enclosure_multi_tenant, enclosure_gen = self._is_enclosure_multi_tenant_and_fetch_gen() + # Verify that the bundle is correctly multi-tenant or not - is_enclosure_multi_tenant = self._is_enclosure_multi_tenant() if is_enclosure_multi_tenant != is_bundle_multi_tenant: return { 'ret': False, - 'msg': 'Enclosure multi-tenant is {0} but bundle multi-tenant is {1}'.format( - is_enclosure_multi_tenant, - is_bundle_multi_tenant, - ) + 'msg': f'Enclosure multi-tenant is {is_enclosure_multi_tenant} but bundle multi-tenant is {is_bundle_multi_tenant}' + } + + # Verify that the bundle is compliant with the target enclosure + if enclosure_gen != bundle_gen: + return { + 'ret': False, + 'msg': f'Enclosure generation is {enclosure_gen} but bundle is of {bundle_gen}' } # Version number installed on IOMs @@ -310,7 +345,7 @@ class WdcRedfishUtils(RedfishUtils): return { 'ret': True, 'changed': False, - 'msg': 'Version {0} already installed'.format(bundle_firmware_version) + 'msg': f'Version {bundle_firmware_version} already installed' } # Version numbers don't match the bundle -- proceed with update (unless we are in check mode) @@ -380,9 +415,7 @@ class WdcRedfishUtils(RedfishUtils): if status_code != self.UPDATE_STATUS_CODE_FW_UPDATE_COMPLETED_WAITING_FOR_ACTIVATION: return { 'ret': False, - 'msg': 'Target is not ready for FW activation after update. Current status: {0} ({1})'.format( - status_code, status_description - )} + 'msg': f'Target is not ready for FW activation after update. Current status: {status_code} ({status_description})'} self.firmware_activate(update_opts) return {'ret': True, 'changed': True, @@ -402,7 +435,7 @@ class WdcRedfishUtils(RedfishUtils): # The other will return an error with message "IOM Module A/B cannot be read" which_iom_is_this = None for iom_letter in ['A', 'B']: - iom_uri = "Chassis/IOModule{0}FRU".format(iom_letter) + iom_uri = f"Chassis/IOModule{iom_letter}FRU" response = self.get_request(self.root_uri + self.service_root + iom_uri) if response['ret'] is False: continue @@ -460,7 +493,7 @@ class WdcRedfishUtils(RedfishUtils): result['ret'] = True data = response['data'] if key not in data: - return {'ret': False, 'msg': "Key %s not found" % key} + return {'ret': False, 'msg': f"Key {key} not found"} current_led_status = data[key] if current_led_status == current_led_status_map[command]: return {'ret': True, 'changed': False} diff --git a/plugins/module_utils/xdg_mime.py b/plugins/module_utils/xdg_mime.py new file mode 100644 index 0000000000..d02002737b --- /dev/null +++ b/plugins/module_utils/xdg_mime.py @@ -0,0 +1,34 @@ +# Copyright (c) 2025, Marcos Alano +# Based on gio_mime module. Copyright (c) 2022, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt + + +def xdg_mime_runner(module, **kwargs): + return CmdRunner( + module, + command=['xdg-mime'], + arg_formats=dict( + default=cmd_runner_fmt.as_fixed('default'), + query=cmd_runner_fmt.as_fixed('query'), + mime_types=cmd_runner_fmt.as_list(), + handler=cmd_runner_fmt.as_list(), + version=cmd_runner_fmt.as_fixed('--version'), + ), + **kwargs + ) + + +def xdg_mime_get(runner, mime_type): + def process(rc, out, err): + if not out.strip(): + return None + out = out.splitlines()[0] + return out.split()[-1] + + with runner("query default mime_types", output_process=process) as ctx: + return ctx.run(mime_types=mime_type) diff --git a/plugins/module_utils/xenserver.py b/plugins/module_utils/xenserver.py index 3176b56289..32576000cc 100644 --- a/plugins/module_utils/xenserver.py +++ b/plugins/module_utils/xenserver.py @@ -1,11 +1,9 @@ -# -*- coding: utf-8 -*- # # Copyright (c) 2018, Bojan Vitnik # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations import atexit import time @@ -28,22 +26,18 @@ def xenserver_common_argument_spec(): return dict( hostname=dict(type='str', aliases=['host', 'pool'], - required=False, default='localhost', fallback=(env_fallback, ['XENSERVER_HOST']), ), username=dict(type='str', aliases=['user', 'admin'], - required=False, default='root', fallback=(env_fallback, ['XENSERVER_USER'])), password=dict(type='str', aliases=['pass', 'pwd'], - required=False, no_log=True, fallback=(env_fallback, ['XENSERVER_PASSWORD'])), validate_certs=dict(type='bool', - required=False, default=True, fallback=(env_fallback, ['XENSERVER_VALIDATE_CERTS'])), ) @@ -294,29 +288,29 @@ def get_object_ref(module, name, uuid=None, obj_type="VM", fail=True, msg_prefix try: # Find object by UUID. If no object is found using given UUID, # an exception will be generated. - obj_ref = xapi_session.xenapi_request("%s.get_by_uuid" % real_obj_type, (uuid,)) + obj_ref = xapi_session.xenapi_request(f"{real_obj_type}.get_by_uuid", (uuid,)) except XenAPI.Failure as f: if fail: - module.fail_json(msg="%s%s with UUID '%s' not found!" % (msg_prefix, obj_type, uuid)) + module.fail_json(msg=f"{msg_prefix}{obj_type} with UUID '{uuid}' not found!") elif name: try: # Find object by name (name_label). - obj_ref_list = xapi_session.xenapi_request("%s.get_by_name_label" % real_obj_type, (name,)) + obj_ref_list = xapi_session.xenapi_request(f"{real_obj_type}.get_by_name_label", (name,)) except XenAPI.Failure as f: - module.fail_json(msg="XAPI ERROR: %s" % f.details) + module.fail_json(msg=f"XAPI ERROR: {f.details}") # If obj_ref_list is empty. if not obj_ref_list: if fail: - module.fail_json(msg="%s%s with name '%s' not found!" % (msg_prefix, obj_type, name)) + module.fail_json(msg=f"{msg_prefix}{obj_type} with name '{name}' not found!") # If obj_ref_list contains multiple object references. elif len(obj_ref_list) > 1: - module.fail_json(msg="%smultiple %ss with name '%s' found! Please use UUID." % (msg_prefix, obj_type, name)) + module.fail_json(msg=f"{msg_prefix}multiple {obj_type}s with name '{name}' found! Please use UUID.") # The obj_ref_list contains only one object reference. else: obj_ref = obj_ref_list[0] else: - module.fail_json(msg="%sno valid name or UUID supplied for %s!" % (msg_prefix, obj_type)) + module.fail_json(msg=f"{msg_prefix}no valid name or UUID supplied for {obj_type}!") return obj_ref @@ -402,7 +396,7 @@ def gather_vm_params(module, vm_ref): vm_params['customization_agent'] = "custom" except XenAPI.Failure as f: - module.fail_json(msg="XAPI ERROR: %s" % f.details) + module.fail_json(msg=f"XAPI ERROR: {f.details}") return vm_params @@ -478,12 +472,13 @@ def gather_vm_facts(module, vm_params): "mac": vm_vif_params['MAC'], "vif_device": vm_vif_params['device'], "mtu": vm_vif_params['MTU'], - "ip": vm_guest_metrics_networks.get("%s/ip" % vm_vif_params['device'], ''), + "ip": vm_guest_metrics_networks.get(f"{vm_vif_params['device']}/ip", ''), "prefix": "", "netmask": "", "gateway": "", - "ip6": [vm_guest_metrics_networks[ipv6] for ipv6 in sorted(vm_guest_metrics_networks.keys()) if ipv6.startswith("%s/ipv6/" % - vm_vif_params['device'])], + "ip6": [vm_guest_metrics_networks[ipv6] + for ipv6 in sorted(vm_guest_metrics_networks.keys()) + if ipv6.startswith(f"{vm_vif_params['device']}/ipv6/")], "prefix6": "", "gateway6": "", } @@ -504,7 +499,7 @@ def gather_vm_facts(module, vm_params): vm_xenstore_data = vm_params['xenstore_data'] for f in ['prefix', 'netmask', 'gateway', 'prefix6', 'gateway6']: - vm_network_params[f] = vm_xenstore_data.get("vm-data/networks/%s/%s" % (vm_vif_params['device'], f), "") + vm_network_params[f] = vm_xenstore_data.get(f"vm-data/networks/{vm_vif_params['device']}/{f}", "") vm_facts['networks'].append(vm_network_params) @@ -571,14 +566,14 @@ def set_vm_power_state(module, vm_ref, power_state, timeout=300): if not module.check_mode: xapi_session.xenapi.VM.hard_reboot(vm_ref) else: - module.fail_json(msg="Cannot restart VM in state '%s'!" % vm_power_state_current) + module.fail_json(msg=f"Cannot restart VM in state '{vm_power_state_current}'!") elif power_state == "suspended": # running state is required for suspend. if vm_power_state_current == "poweredon": if not module.check_mode: xapi_session.xenapi.VM.suspend(vm_ref) else: - module.fail_json(msg="Cannot suspend VM in state '%s'!" % vm_power_state_current) + module.fail_json(msg=f"Cannot suspend VM in state '{vm_power_state_current}'!") elif power_state == "shutdownguest": # running state is required for guest shutdown. if vm_power_state_current == "poweredon": @@ -590,9 +585,9 @@ def set_vm_power_state(module, vm_ref, power_state, timeout=300): task_result = wait_for_task(module, task_ref, timeout) if task_result: - module.fail_json(msg="Guest shutdown task failed: '%s'!" % task_result) + module.fail_json(msg=f"Guest shutdown task failed: '{task_result}'!") else: - module.fail_json(msg="Cannot shutdown guest when VM is in state '%s'!" % vm_power_state_current) + module.fail_json(msg=f"Cannot shutdown guest when VM is in state '{vm_power_state_current}'!") elif power_state == "rebootguest": # running state is required for guest reboot. if vm_power_state_current == "poweredon": @@ -604,15 +599,15 @@ def set_vm_power_state(module, vm_ref, power_state, timeout=300): task_result = wait_for_task(module, task_ref, timeout) if task_result: - module.fail_json(msg="Guest reboot task failed: '%s'!" % task_result) + module.fail_json(msg=f"Guest reboot task failed: '{task_result}'!") else: - module.fail_json(msg="Cannot reboot guest when VM is in state '%s'!" % vm_power_state_current) + module.fail_json(msg=f"Cannot reboot guest when VM is in state '{vm_power_state_current}'!") else: - module.fail_json(msg="Requested VM power state '%s' is unsupported!" % power_state) + module.fail_json(msg=f"Requested VM power state '{power_state}' is unsupported!") state_changed = True except XenAPI.Failure as f: - module.fail_json(msg="XAPI ERROR: %s" % f.details) + module.fail_json(msg=f"XAPI ERROR: {f.details}") return (state_changed, vm_power_state_resulting) @@ -671,7 +666,7 @@ def wait_for_task(module, task_ref, timeout=300): xapi_session.xenapi.task.destroy(task_ref) except XenAPI.Failure as f: - module.fail_json(msg="XAPI ERROR: %s" % f.details) + module.fail_json(msg=f"XAPI ERROR: {f.details}") return result @@ -703,7 +698,7 @@ def wait_for_vm_ip_address(module, vm_ref, timeout=300): vm_power_state = xapi_to_module_vm_power_state(xapi_session.xenapi.VM.get_power_state(vm_ref).lower()) if vm_power_state != 'poweredon': - module.fail_json(msg="Cannot wait for VM IP address when VM is in state '%s'!" % vm_power_state) + module.fail_json(msg=f"Cannot wait for VM IP address when VM is in state '{vm_power_state}'!") interval = 2 @@ -734,7 +729,7 @@ def wait_for_vm_ip_address(module, vm_ref, timeout=300): module.fail_json(msg="Timed out waiting for VM IP address!") except XenAPI.Failure as f: - module.fail_json(msg="XAPI ERROR: %s" % f.details) + module.fail_json(msg=f"XAPI ERROR: {f.details}") return vm_guest_metrics @@ -797,7 +792,7 @@ class XAPI(object): # If scheme is not specified we default to http:// because https:// # is problematic in most setups. if not hostname.startswith("http://") and not hostname.startswith("https://"): - hostname = "http://%s" % hostname + hostname = f"http://{hostname}" try: # ignore_ssl is supported in XenAPI library from XenServer 7.2 @@ -816,7 +811,7 @@ class XAPI(object): try: cls._xapi_session.login_with_password(username, password, ANSIBLE_VERSION, 'Ansible') except XenAPI.Failure as f: - module.fail_json(msg="Unable to log on to XenServer at %s as %s: %s" % (hostname, username, f.details)) + module.fail_json(msg=f"Unable to log on to XenServer at {hostname} as {username}: {f.details}") # Disabling atexit should be used in special cases only. if disconnect_atexit: @@ -859,4 +854,4 @@ class XenServerObject(object): self.default_sr_ref = self.xapi_session.xenapi.pool.get_default_SR(self.pool_ref) self.xenserver_version = get_xenserver_version(module) except XenAPI.Failure as f: - self.module.fail_json(msg="XAPI ERROR: %s" % f.details) + self.module.fail_json(msg=f"XAPI ERROR: {f.details}") diff --git a/plugins/module_utils/xfconf.py b/plugins/module_utils/xfconf.py index b63518d0c4..8febbf450d 100644 --- a/plugins/module_utils/xfconf.py +++ b/plugins/module_utils/xfconf.py @@ -1,22 +1,20 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2022, Alexei Znamensky # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations from ansible.module_utils.parsing.convert_bool import boolean -from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt as fmt +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt -@fmt.unpack_args +@cmd_runner_fmt.unpack_args def _values_fmt(values, value_types): result = [] for value, value_type in zip(values, value_types): if value_type == 'bool': value = 'true' if boolean(value) else 'false' - result.extend(['--type', '{0}'.format(value_type), '--set', '{0}'.format(value)]) + result.extend(['--type', f'{value_type}', '--set', f'{value}']) return result @@ -25,14 +23,21 @@ def xfconf_runner(module, **kwargs): module, command='xfconf-query', arg_formats=dict( - channel=fmt.as_opt_val("--channel"), - property=fmt.as_opt_val("--property"), - force_array=fmt.as_bool("--force-array"), - reset=fmt.as_bool("--reset"), - create=fmt.as_bool("--create"), - list_arg=fmt.as_bool("--list"), - values_and_types=fmt.as_func(_values_fmt), + channel=cmd_runner_fmt.as_opt_val("--channel"), + property=cmd_runner_fmt.as_opt_val("--property"), + force_array=cmd_runner_fmt.as_bool("--force-array"), + reset=cmd_runner_fmt.as_bool("--reset"), + create=cmd_runner_fmt.as_bool("--create"), + list_arg=cmd_runner_fmt.as_bool("--list"), + values_and_types=_values_fmt, + version=cmd_runner_fmt.as_fixed("--version"), ), **kwargs ) return runner + + +def get_xfconf_version(runner): + with runner("version") as ctx: + rc, out, err = ctx.run() + return out.splitlines()[0].split()[1] diff --git a/plugins/modules/aerospike_migrations.py b/plugins/modules/aerospike_migrations.py index 32ab06a853..02b8bd7730 100644 --- a/plugins/modules/aerospike_migrations.py +++ b/plugins/modules/aerospike_migrations.py @@ -1,111 +1,106 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- """short_description: Check or wait for migrations between nodes""" # Copyright (c) 2018, Albert Autin # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: aerospike_migrations short_description: Check or wait for migrations between nodes description: - - This can be used to check for migrations in a cluster. - This makes it easy to do a rolling upgrade/update on Aerospike nodes. - - If waiting for migrations is not desired, simply just poll until - port 3000 if available or asinfo -v status returns ok + - This can be used to check for migrations in a cluster. This makes it easy to do a rolling upgrade/update on Aerospike + nodes. + - If waiting for migrations is not desired, simply just poll until port 3000 if available or C(asinfo -v status) returns + ok. author: "Albert Autin (@Alb0t)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - host: - description: - - Which host do we use as seed for info connection - required: false - type: str - default: localhost - port: - description: - - Which port to connect to Aerospike on (service port) - required: false - type: int - default: 3000 - connect_timeout: - description: - - How long to try to connect before giving up (milliseconds) - required: false - type: int - default: 1000 - consecutive_good_checks: - description: - - How many times should the cluster report "no migrations" - consecutively before returning OK back to ansible? - required: false - type: int - default: 3 - sleep_between_checks: - description: - - How long to sleep between each check (seconds). - required: false - type: int - default: 60 - tries_limit: - description: - - How many times do we poll before giving up and failing? - default: 300 - required: false - type: int - local_only: - description: - - Do you wish to only check for migrations on the local node - before returning, or do you want all nodes in the cluster - to finish before returning? - required: true - type: bool - min_cluster_size: - description: - - Check will return bad until cluster size is met - or until tries is exhausted - required: false - type: int - default: 1 - fail_on_cluster_change: - description: - - Fail if the cluster key changes - if something else is changing the cluster, we may want to fail - required: false - type: bool - default: true - migrate_tx_key: - description: - - The metric key used to determine if we have tx migrations - remaining. Changeable due to backwards compatibility. - required: false - type: str - default: migrate_tx_partitions_remaining - migrate_rx_key: - description: - - The metric key used to determine if we have rx migrations - remaining. Changeable due to backwards compatibility. - required: false - type: str - default: migrate_rx_partitions_remaining - target_cluster_size: - description: - - When all aerospike builds in the cluster are greater than - version 4.3, then the C(cluster-stable) info command will be used. - Inside this command, you can optionally specify what the target - cluster size is - but it is not necessary. You can still rely on - min_cluster_size if you don't want to use this option. - - If this option is specified on a cluster that has at least 1 - host <4.3 then it will be ignored until the min version reaches - 4.3. - required: false - type: int -''' -EXAMPLES = ''' + host: + description: + - Which host do we use as seed for info connection. + type: str + default: localhost + port: + description: + - Which port to connect to Aerospike on (service port). + required: false + type: int + default: 3000 + connect_timeout: + description: + - How long to try to connect before giving up (milliseconds). + required: false + type: int + default: 1000 + consecutive_good_checks: + description: + - How many times should the cluster report "no migrations" consecutively before returning OK back to ansible? + required: false + type: int + default: 3 + sleep_between_checks: + description: + - How long to sleep between each check (seconds). + required: false + type: int + default: 60 + tries_limit: + description: + - How many times do we poll before giving up and failing? + default: 300 + required: false + type: int + local_only: + description: + - Do you wish to only check for migrations on the local node before returning, or do you want all nodes in the cluster + to finish before returning? + required: true + type: bool + min_cluster_size: + description: + - Check fails until cluster size is met or until tries is exhausted. + required: false + type: int + default: 1 + fail_on_cluster_change: + description: + - Fail if the cluster key changes if something else is changing the cluster, we may want to fail. + required: false + type: bool + default: true + migrate_tx_key: + description: + - The metric key used to determine if we have tx migrations remaining. Changeable due to backwards compatibility. + required: false + type: str + default: migrate_tx_partitions_remaining + migrate_rx_key: + description: + - The metric key used to determine if we have rx migrations remaining. Changeable due to backwards compatibility. + required: false + type: str + default: migrate_rx_partitions_remaining + target_cluster_size: + description: + - When all aerospike builds in the cluster are greater than version 4.3, then the C(cluster-stable) info command is + used. Inside this command, you can optionally specify what the target cluster size is - but it is not necessary. + You can still rely on O(min_cluster_size) if you do not want to use this option. + - If this option is specified on a cluster that has at least one host <4.3 then it is ignored until the min version + reaches 4.3. + required: false + type: int +""" + +EXAMPLES = r""" # check for migrations on local node - name: Wait for migrations on local node before proceeding community.general.aerospike_migrations: @@ -125,13 +120,13 @@ EXAMPLES = ''' - name: Install dependencies ansible.builtin.apt: name: - - python - - python-pip - - python-setuptools + - python + - python-pip + - python-setuptools state: latest - name: Setup aerospike ansible.builtin.pip: - name: aerospike + name: aerospike # check for migrations every (sleep_between_checks) # If at least (consecutive_good_checks) checks come back OK in a row, then return OK. # Will exit if any exception, which can be caused by bad nodes, @@ -140,13 +135,13 @@ EXAMPLES = ''' # Tries Limit * Sleep Between Checks * delay * retries - name: Wait for aerospike migrations community.general.aerospike_migrations: - local_only: true - sleep_between_checks: 1 - tries_limit: 5 - consecutive_good_checks: 3 - fail_on_cluster_change: true - min_cluster_size: 3 - target_cluster_size: 4 + local_only: true + sleep_between_checks: 1 + tries_limit: 5 + consecutive_good_checks: 3 + fail_on_cluster_change: true + min_cluster_size: 3 + target_cluster_size: 4 register: migrations_check until: migrations_check is succeeded changed_when: false @@ -154,14 +149,14 @@ EXAMPLES = ''' retries: 120 - name: Another thing ansible.builtin.shell: | - echo foo + echo foo - name: Reboot ansible.builtin.reboot: -''' +""" -RETURN = ''' +RETURN = r""" # Returns only a success/failure result. Changed is always false. -''' +""" import traceback @@ -182,19 +177,19 @@ else: def run_module(): """run ansible module""" module_args = dict( - host=dict(type='str', required=False, default='localhost'), - port=dict(type='int', required=False, default=3000), - connect_timeout=dict(type='int', required=False, default=1000), - consecutive_good_checks=dict(type='int', required=False, default=3), - sleep_between_checks=dict(type='int', required=False, default=60), - tries_limit=dict(type='int', required=False, default=300), + host=dict(type='str', default='localhost'), + port=dict(type='int', default=3000), + connect_timeout=dict(type='int', default=1000), + consecutive_good_checks=dict(type='int', default=3), + sleep_between_checks=dict(type='int', default=60), + tries_limit=dict(type='int', default=300), local_only=dict(type='bool', required=True), - min_cluster_size=dict(type='int', required=False, default=1), - target_cluster_size=dict(type='int', required=False, default=None), - fail_on_cluster_change=dict(type='bool', required=False, default=True), - migrate_tx_key=dict(type='str', required=False, no_log=False, + min_cluster_size=dict(type='int', default=1), + target_cluster_size=dict(type='int'), + fail_on_cluster_change=dict(type='bool', default=True), + migrate_tx_key=dict(type='str', no_log=False, default="migrate_tx_partitions_remaining"), - migrate_rx_key=dict(type='str', required=False, no_log=False, + migrate_rx_key=dict(type='str', no_log=False, default="migrate_rx_partitions_remaining") ) diff --git a/plugins/modules/airbrake_deployment.py b/plugins/modules/airbrake_deployment.py index 2fb5b58737..d55c04fa52 100644 --- a/plugins/modules/airbrake_deployment.py +++ b/plugins/modules/airbrake_deployment.py @@ -1,27 +1,31 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright 2013 Bruce Pennypacker # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: airbrake_deployment author: -- "Bruce Pennypacker (@bpennypacker)" -- "Patrick Humpal (@phumpal)" + - "Bruce Pennypacker (@bpennypacker)" + - "Patrick Humpal (@phumpal)" short_description: Notify airbrake about app deployments description: - - Notify airbrake about app deployments (see U(https://airbrake.io/docs/api/#deploys-v4)). + - Notify airbrake about app deployments (see U(https://airbrake.io/docs/api/#deploys-v4)). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: project_id: description: - - Airbrake PROJECT_ID + - Airbrake PROJECT_ID. required: true type: str version_added: '0.2.0' @@ -33,27 +37,27 @@ options: version_added: '0.2.0' environment: description: - - The airbrake environment name, typically 'production', 'staging', etc. + - The airbrake environment name, typically v(production), V(staging), and so on. required: true type: str user: description: - - The username of the person doing the deployment + - The username of the person doing the deployment. required: false type: str repo: description: - - URL of the project repository + - URL of the project repository. required: false type: str revision: description: - - A hash, number, tag, or other identifier showing what revision from version control was deployed + - A hash, number, tag, or other identifier showing what revision from version control was deployed. required: false type: str version: description: - - A string identifying what version was deployed + - A string identifying what version was deployed. required: false type: str version_added: '1.0.0' @@ -65,16 +69,16 @@ options: type: str validate_certs: description: - - If C(false), SSL certificates for the target url will not be validated. This should only be used - on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates for the target URL is not validated. This should only be used on personally controlled + sites using self-signed certificates. required: false default: true type: bool requirements: [] -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Notify airbrake about an app deployment community.general.airbrake_deployment: project_id: '12345' @@ -91,7 +95,7 @@ EXAMPLES = ''' user: ansible revision: 'e54dd3a01f2c421b558ef33b5f79db936e2dcf15' version: '0.2.0' -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url @@ -108,11 +112,11 @@ def main(): project_id=dict(required=True, no_log=True, type='str'), project_key=dict(required=True, no_log=True, type='str'), environment=dict(required=True, type='str'), - user=dict(required=False, type='str'), - repo=dict(required=False, type='str'), - revision=dict(required=False, type='str'), - version=dict(required=False, type='str'), - url=dict(required=False, default='https://api.airbrake.io/api/v4/projects/', type='str'), + user=dict(type='str'), + repo=dict(type='str'), + revision=dict(type='str'), + version=dict(type='str'), + url=dict(default='https://api.airbrake.io/api/v4/projects/', type='str'), validate_certs=dict(default=True, type='bool'), ), supports_check_mode=True, diff --git a/plugins/modules/aix_devices.py b/plugins/modules/aix_devices.py index be23937baa..a525f6fe05 100644 --- a/plugins/modules/aix_devices.py +++ b/plugins/modules/aix_devices.py @@ -1,53 +1,57 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017, 2018 Kairo Araujo # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" author: -- Kairo Araujo (@kairoaraujo) + - Kairo Araujo (@kairoaraujo) module: aix_devices short_description: Manages AIX devices description: -- This module discovers, defines, removes and modifies attributes of AIX devices. + - This module discovers, defines, removes and modifies attributes of AIX devices. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: attributes: description: - - A list of device attributes. + - A list of device attributes. type: dict device: description: - - The name of the device. - - C(all) is valid to rescan C(available) all devices (AIX cfgmgr command). + - The name of the device. + - V(all) is valid to rescan C(available) all devices (AIX C(cfgmgr) command). type: str force: description: - - Forces action. + - Forces action. type: bool default: false recursive: description: - - Removes or defines a device and children devices. + - Removes or defines a device and children devices. type: bool default: false state: description: - - Controls the device state. - - C(available) (alias C(present)) rescan a specific device or all devices (when C(device) is not specified). - - C(removed) (alias C(absent) removes a device. - - C(defined) changes device to Defined state. + - Controls the device state. + - V(available) (alias V(present)) rescan a specific device or all devices (when O(device) is not specified). + - V(removed) (alias V(absent) removes a device. + - V(defined) changes device to Defined state. type: str - choices: [ available, defined, removed ] + choices: [available, defined, removed] default: available -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Scan new devices community.general.aix_devices: device: all @@ -101,7 +105,7 @@ EXAMPLES = r''' device: en1 attributes: mtu: 900 - arp: off + arp: 'off' state: available - name: Configure IP, netmask and set en1 up. @@ -119,9 +123,9 @@ EXAMPLES = r''' attributes: alias4: 10.0.0.100,255.255.255.0 state: available -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/aix_filesystem.py b/plugins/modules/aix_filesystem.py index 77d065a59a..58d49c0252 100644 --- a/plugins/modules/aix_filesystem.py +++ b/plugins/modules/aix_filesystem.py @@ -1,24 +1,27 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017, Kairo Araujo # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" author: - Kairo Araujo (@kairoaraujo) module: aix_filesystem short_description: Configure LVM and NFS file systems for AIX description: - - This module creates, removes, mount and unmount LVM and NFS file system for - AIX using C(/etc/filesystems). + - This module creates, removes, mount and unmount LVM and NFS file system for AIX using C(/etc/filesystems). - For LVM file systems is possible to resize a file system. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: account_subsystem: description: @@ -31,8 +34,8 @@ options: type: list elements: str default: - - agblksize='4096' - - isnapshot='no' + - agblksize=4096 + - isnapshot=no auto_mount: description: - File system is automatically mounted at system restart. @@ -42,7 +45,7 @@ options: description: - Logical volume (LV) device name or remote export device to create a NFS file system. - It is used to create a file system on an already existing logical volume or the exported NFS file system. - - If not mentioned a new logical volume name will be created following AIX standards (LVM). + - If not mentioned a new logical volume name is created following AIX standards (LVM). type: str fs_type: description: @@ -51,9 +54,9 @@ options: default: jfs2 permissions: description: - - Set file system permissions. C(rw) (read-write) or C(ro) (read-only). + - Set file system permissions. V(rw) (read-write) or V(ro) (read-only). type: str - choices: [ ro, rw ] + choices: [ro, rw] default: rw mount_group: description: @@ -70,41 +73,40 @@ options: type: str rm_mount_point: description: - - Removes the mount point directory when used with state C(absent). + - Removes the mount point directory when used with state V(absent). type: bool default: false size: description: - Specifies the file system size. - - For already C(present) it will be resized. - - 512-byte blocks, Megabytes or Gigabytes. If the value has M specified - it will be in Megabytes. If the value has G specified it will be in - Gigabytes. - - If no M or G the value will be 512-byte blocks. - - If "+" is specified in begin of value, the value will be added. - - If "-" is specified in begin of value, the value will be removed. - - If "+" or "-" is not specified, the total value will be the specified. - - Size will respects the LVM AIX standards. + - For already present it resizes the filesystem. + - 512-byte blocks, megabytes or gigabytes. If the value has M specified it is in megabytes. If the value has G specified + it is in gigabytes. + - If no M or G the value is 512-byte blocks. + - If V(+) is specified in begin of value, the value is added. + - If V(-) is specified in begin of value, the value is removed. + - If neither V(+) nor V(-) is specified, then the total value is the specified. + - Size respects the LVM AIX standards. type: str state: description: - Controls the file system state. - - C(present) check if file system exists, creates or resize. - - C(absent) removes existing file system if already C(unmounted). - - C(mounted) checks if the file system is mounted or mount the file system. - - C(unmounted) check if the file system is unmounted or unmount the file system. + - V(present) check if file system exists, creates or resize. + - V(absent) removes existing file system if already V(unmounted). + - V(mounted) checks if the file system is mounted or mount the file system. + - V(unmounted) check if the file system is unmounted or unmount the file system. type: str - choices: [ absent, mounted, present, unmounted ] + choices: [absent, mounted, present, unmounted] default: present vg: description: - Specifies an existing volume group (VG). type: str notes: - - For more C(attributes), please check "crfs" AIX manual. -''' + - For more O(attributes), please check "crfs" AIX manual. +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create filesystem in a previously defined logical volume. community.general.aix_filesystem: device: testlv @@ -159,18 +161,8 @@ EXAMPLES = r''' filesystem: /newfs rm_mount_point: true state: absent -''' +""" -RETURN = r''' -changed: - description: Return changed for aix_filesystems actions as true or false. - returned: always - type: bool -msg: - description: Return message regarding the action. - returned: always - type: str -''' from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils._mount import ismount @@ -235,7 +227,7 @@ def _validate_vg(module, vg): if rc != 0: module.fail_json(msg="Failed executing %s command." % lsvg_cmd) - rc, current_all_vgs, err = module.run_command([lsvg_cmd, "%s"]) + rc, current_all_vgs, err = module.run_command([lsvg_cmd]) if rc != 0: module.fail_json(msg="Failed executing %s command." % lsvg_cmd) @@ -358,7 +350,53 @@ def create_fs( # Creates a LVM file system. crfs_cmd = module.get_bin_path('crfs', True) if not module.check_mode: - cmd = [crfs_cmd, "-v", fs_type, "-m", filesystem, vg, device, mount_group, auto_mount, account_subsystem, "-p", permissions, size, "-a", attributes] + cmd = [crfs_cmd] + + cmd.append("-v") + cmd.append(fs_type) + + if vg: + (flag, value) = vg.split() + cmd.append(flag) + cmd.append(value) + + if device: + (flag, value) = device.split() + cmd.append(flag) + cmd.append(value) + + cmd.append("-m") + cmd.append(filesystem) + + if mount_group: + (flag, value) = mount_group.split() + cmd.append(flag) + cmd.append(value) + + if auto_mount: + (flag, value) = auto_mount.split() + cmd.append(flag) + cmd.append(value) + + if account_subsystem: + (flag, value) = account_subsystem.split() + cmd.append(flag) + cmd.append(value) + + cmd.append("-p") + cmd.append(permissions) + + if size: + (flag, value) = size.split() + cmd.append(flag) + cmd.append(value) + + if attributes: + splitted_attributes = attributes.split() + cmd.append("-a") + for value in splitted_attributes: + cmd.append(value) + rc, crfs_out, err = module.run_command(cmd) if rc == 10: @@ -454,7 +492,7 @@ def main(): module = AnsibleModule( argument_spec=dict( account_subsystem=dict(type='bool', default=False), - attributes=dict(type='list', elements='str', default=["agblksize='4096'", "isnapshot='no'"]), + attributes=dict(type='list', elements='str', default=["agblksize=4096", "isnapshot=no"]), auto_mount=dict(type='bool', default=True), device=dict(type='str'), filesystem=dict(type='str', required=True), diff --git a/plugins/modules/aix_inittab.py b/plugins/modules/aix_inittab.py index 57ef4758cb..407992ceba 100644 --- a/plugins/modules/aix_inittab.py +++ b/plugins/modules/aix_inittab.py @@ -1,74 +1,78 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017, Joris Weijters # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" author: -- Joris Weijters (@molekuul) + - Joris Weijters (@molekuul) module: aix_inittab -short_description: Manages the inittab on AIX +short_description: Manages the C(inittab) on AIX description: - - Manages the inittab on AIX. + - Manages the C(inittab) on AIX. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: name: description: - - Name of the inittab entry. + - Name of the C(inittab) entry. type: str required: true - aliases: [ service ] + aliases: [service] runlevel: description: - - Runlevel of the entry. + - Runlevel of the entry. type: str required: true action: description: - - Action what the init has to do with this entry. + - Action what the init has to do with this entry. type: str choices: - - boot - - bootwait - - hold - - initdefault - - 'off' - - once - - ondemand - - powerfail - - powerwait - - respawn - - sysinit - - wait + - boot + - bootwait + - hold + - initdefault + - 'off' + - once + - ondemand + - powerfail + - powerwait + - respawn + - sysinit + - wait command: description: - - What command has to run. + - What command has to run. type: str required: true insertafter: description: - - After which inittabline should the new entry inserted. + - After which inittabline should the new entry inserted. type: str state: description: - - Whether the entry should be present or absent in the inittab file. + - Whether the entry should be present or absent in the inittab file. type: str - choices: [ absent, present ] + choices: [absent, present] default: present notes: - The changes are persistent across reboots. - You need root rights to read or adjust the inittab with the C(lsitab), C(chitab), C(mkitab) or C(rmitab) commands. - Tested on AIX 7.1. requirements: -- itertools -''' + - itertools +""" -EXAMPLES = ''' +EXAMPLES = r""" # Add service startmyservice to the inittab, directly after service existingservice. - name: Add startmyservice to inittab community.general.aix_inittab: @@ -98,32 +102,16 @@ EXAMPLES = ''' command: echo hello state: absent become: true -''' +""" -RETURN = ''' +RETURN = r""" name: - description: Name of the adjusted inittab entry - returned: always - type: str - sample: startmyservice -msg: - description: Action done with the inittab entry - returned: changed - type: str - sample: changed inittab entry startmyservice -changed: - description: Whether the inittab changed or not - returned: always - type: bool - sample: true -''' + description: Name of the adjusted C(inittab) entry. + returned: always + type: str + sample: startmyservice +""" -# Import necessary libraries -try: - # python 2 - from itertools import izip -except ImportError: - izip = zip from ansible.module_utils.basic import AnsibleModule @@ -142,7 +130,7 @@ def check_current_entry(module): values = out.split(":") # strip non readable characters as \n values = map(lambda s: s.strip(), values) - existsdict = dict(izip(keys, values)) + existsdict = dict(zip(keys, values)) existsdict.update({'exist': True}) return existsdict @@ -185,6 +173,7 @@ def main(): rmitab = module.get_bin_path('rmitab') chitab = module.get_bin_path('chitab') rc = 0 + err = None # check if the new entry exists current_entry = check_current_entry(module) @@ -197,7 +186,7 @@ def main(): ":" + module.params['action'] + ":" + module.params['command'] # If current entry exists or fields are different(if the entry does not - # exists, then the entry wil be created + # exists, then the entry will be created if (not current_entry['exist']) or ( module.params['runlevel'] != current_entry['runlevel'] or module.params['action'] != current_entry['action'] or diff --git a/plugins/modules/aix_lvg.py b/plugins/modules/aix_lvg.py index 44ad631236..7afc58e2f7 100644 --- a/plugins/modules/aix_lvg.py +++ b/plugins/modules/aix_lvg.py @@ -1,61 +1,65 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017, Kairo Araujo # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" author: -- Kairo Araujo (@kairoaraujo) + - Kairo Araujo (@kairoaraujo) module: aix_lvg short_description: Manage LVM volume groups on AIX description: -- This module creates, removes or resize volume groups on AIX LVM. + - This module creates, removes or resize volume groups on AIX LVM. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: force: description: - - Force volume group creation. + - Force volume group creation. type: bool default: false pp_size: description: - - The size of the physical partition in megabytes. + - The size of the physical partition in megabytes. type: int pvs: description: - - List of comma-separated devices to use as physical devices in this volume group. - - Required when creating or extending (C(present) state) the volume group. - - If not informed reducing (C(absent) state) the volume group will be removed. + - List of comma-separated devices to use as physical devices in this volume group. + - Required when creating or extending (V(present) state) the volume group. + - If not informed reducing (V(absent) state) the volume group is removed. type: list elements: str state: description: - - Control if the volume group exists and volume group AIX state varyonvg C(varyon) or varyoffvg C(varyoff). + - Control if the volume group exists and volume group AIX state varyonvg V(varyon) or varyoffvg V(varyoff). type: str - choices: [ absent, present, varyoff, varyon ] + choices: [absent, present, varyoff, varyon] default: present vg: description: - - The name of the volume group. + - The name of the volume group. type: str required: true vg_type: description: - - The type of the volume group. + - The type of the volume group. type: str - choices: [ big, normal, scalable ] + choices: [big, normal, scalable] default: normal notes: -- AIX will permit remove VG only if all LV/Filesystems are not busy. -- Module does not modify PP size for already present volume group. -''' + - AIX allows removing VG only if all LV/Filesystems are not busy. + - Module does not modify PP size for already present volume group. +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a volume group datavg community.general.aix_lvg: vg: datavg @@ -79,9 +83,9 @@ EXAMPLES = r''' vg: rootvg pvs: hdisk1 state: absent -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/aix_lvol.py b/plugins/modules/aix_lvol.py index 6219bdb8e3..53679fb48d 100644 --- a/plugins/modules/aix_lvol.py +++ b/plugins/modules/aix_lvol.py @@ -1,77 +1,81 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2016, Alain Dejoux # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" author: - - Alain Dejoux (@adejoux) + - Alain Dejoux (@adejoux) module: aix_lvol short_description: Configure AIX LVM logical volumes description: - This module creates, removes or resizes AIX logical volumes. Inspired by lvol module. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: vg: description: - - The volume group this logical volume is part of. + - The volume group this logical volume is part of. type: str required: true lv: description: - - The name of the logical volume. + - The name of the logical volume. type: str required: true lv_type: description: - - The type of the logical volume. + - The type of the logical volume. type: str default: jfs2 size: description: - - The size of the logical volume with one of the [MGT] units. + - The size of the logical volume with one of the [MGT] units. type: str copies: description: - - The number of copies of the logical volume. - - Maximum copies are 3. + - The number of copies of the logical volume. + - Maximum copies are 3. type: int default: 1 policy: description: - - Sets the interphysical volume allocation policy. - - C(maximum) allocates logical partitions across the maximum number of physical volumes. - - C(minimum) allocates logical partitions across the minimum number of physical volumes. + - Sets the interphysical volume allocation policy. + - V(maximum) allocates logical partitions across the maximum number of physical volumes. + - V(minimum) allocates logical partitions across the minimum number of physical volumes. type: str - choices: [ maximum, minimum ] + choices: [maximum, minimum] default: maximum state: description: - - Control if the logical volume exists. If C(present) and the - volume does not already exist then the C(size) option is required. + - Control if the logical volume exists. If V(present) and the volume does not already exist then the O(size) option + is required. type: str - choices: [ absent, present ] + choices: [absent, present] default: present opts: description: - - Free-form options to be passed to the mklv command. + - Free-form options to be passed to the mklv command. type: str default: '' pvs: description: - - A list of physical volumes e.g. C(hdisk1,hdisk2). + - A list of physical volumes, for example V(hdisk1,hdisk2). type: list elements: str default: [] -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a logical volume of 512M community.general.aix_lvol: vg: testvg @@ -83,7 +87,7 @@ EXAMPLES = r''' vg: testvg lv: test2lv size: 512M - pvs: [ hdisk1, hdisk2 ] + pvs: [hdisk1, hdisk2] - name: Create a logical volume of 512M mirrored community.general.aix_lvol: @@ -117,15 +121,15 @@ EXAMPLES = r''' vg: testvg lv: testlv state: absent -''' +""" -RETURN = r''' +RETURN = r""" msg: type: str description: A friendly message describing the task result. returned: always sample: Logical volume testlv created. -''' +""" import re @@ -233,8 +237,6 @@ def main(): state = module.params['state'] pvs = module.params['pvs'] - pv_list = ' '.join(pvs) - if policy == 'maximum': lv_policy = 'x' else: @@ -242,16 +244,16 @@ def main(): # Add echo command when running in check-mode if module.check_mode: - test_opt = 'echo ' + test_opt = [module.get_bin_path("echo", required=True)] else: - test_opt = '' + test_opt = [] # check if system commands are available lsvg_cmd = module.get_bin_path("lsvg", required=True) lslv_cmd = module.get_bin_path("lslv", required=True) # Get information on volume group requested - rc, vg_info, err = module.run_command("%s %s" % (lsvg_cmd, vg)) + rc, vg_info, err = module.run_command([lsvg_cmd, vg]) if rc != 0: if state == 'absent': @@ -266,8 +268,7 @@ def main(): lv_size = round_ppsize(convert_size(module, size), base=this_vg['pp_size']) # Get information on logical volume requested - rc, lv_info, err = module.run_command( - "%s %s" % (lslv_cmd, lv)) + rc, lv_info, err = module.run_command([lslv_cmd, lv]) if rc != 0: if state == 'absent': @@ -289,7 +290,7 @@ def main(): # create LV mklv_cmd = module.get_bin_path("mklv", required=True) - cmd = "%s %s -t %s -y %s -c %s -e %s %s %s %sM %s" % (test_opt, mklv_cmd, lv_type, lv, copies, lv_policy, opts, vg, lv_size, pv_list) + cmd = test_opt + [mklv_cmd, "-t", lv_type, "-y", lv, "-c", copies, "-e", lv_policy, opts, vg, "%sM" % (lv_size, )] + pvs rc, out, err = module.run_command(cmd) if rc == 0: module.exit_json(changed=True, msg="Logical volume %s created." % lv) @@ -299,7 +300,7 @@ def main(): if state == 'absent': # remove LV rmlv_cmd = module.get_bin_path("rmlv", required=True) - rc, out, err = module.run_command("%s %s -f %s" % (test_opt, rmlv_cmd, this_lv['name'])) + rc, out, err = module.run_command(test_opt + [rmlv_cmd, "-f", this_lv['name']]) if rc == 0: module.exit_json(changed=True, msg="Logical volume %s deleted." % lv) else: @@ -308,7 +309,7 @@ def main(): if this_lv['policy'] != policy: # change lv allocation policy chlv_cmd = module.get_bin_path("chlv", required=True) - rc, out, err = module.run_command("%s %s -e %s %s" % (test_opt, chlv_cmd, lv_policy, this_lv['name'])) + rc, out, err = module.run_command(test_opt + [chlv_cmd, "-e", lv_policy, this_lv['name']]) if rc == 0: module.exit_json(changed=True, msg="Logical volume %s policy changed: %s." % (lv, policy)) else: @@ -324,7 +325,7 @@ def main(): # resize LV based on absolute values if int(lv_size) > this_lv['size']: extendlv_cmd = module.get_bin_path("extendlv", required=True) - cmd = "%s %s %s %sM" % (test_opt, extendlv_cmd, lv, lv_size - this_lv['size']) + cmd = test_opt + [extendlv_cmd, lv, "%sM" % (lv_size - this_lv['size'], )] rc, out, err = module.run_command(cmd) if rc == 0: module.exit_json(changed=True, msg="Logical volume %s size extended to %sMB." % (lv, lv_size)) diff --git a/plugins/modules/alerta_customer.py b/plugins/modules/alerta_customer.py index b9bfd4b265..aec3923206 100644 --- a/plugins/modules/alerta_customer.py +++ b/plugins/modules/alerta_customer.py @@ -1,15 +1,12 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2022, Christian Wollinger <@cwollinger> # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: alerta_customer short_description: Manage customers in Alerta version_added: 4.8.0 @@ -18,8 +15,15 @@ description: author: Christian Wollinger (@cwollinger) seealso: - name: API documentation - description: Documentation for Alerta API + description: Documentation for Alerta API. link: https://docs.alerta.io/api/reference.html#customers +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: customer: description: @@ -51,13 +55,13 @@ options: state: description: - Whether the customer should exist or not. - - Both I(customer) and I(match) identify a customer that should be added or removed. + - Both O(customer) and O(match) identify a customer that should be added or removed. type: str - choices: [ absent, present ] + choices: [absent, present] default: present -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Create customer community.general.alerta_customer: alerta_url: https://alerta.example.com @@ -76,7 +80,7 @@ EXAMPLES = """ state: absent """ -RETURN = """ +RETURN = r""" msg: description: - Success or failure message. diff --git a/plugins/modules/ali_instance.py b/plugins/modules/ali_instance.py index 96a042f5ca..0434f0d79f 100644 --- a/plugins/modules/ali_instance.py +++ b/plugins/modules/ali_instance.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -20,241 +19,241 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see http://www.gnu.org/licenses/. -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ali_instance -short_description: Create, Start, Stop, Restart or Terminate an Instance in ECS; Add or Remove Instance to/from a Security Group +short_description: Create, Start, Stop, Restart or Terminate an Instance in ECS; Add or Remove Instance to/from a Security + Group description: - - Create, start, stop, restart, modify or terminate ecs instances. - - Add or remove ecs instances to/from security group. + - Create, start, stop, restart, modify or terminate ECS instances. + - Add or remove ecs instances to/from security group. +attributes: + check_mode: + support: none + diff_mode: + support: none options: - state: - description: - - The state of the instance after operating. - default: 'present' - choices: ['present', 'running', 'stopped', 'restarted', 'absent'] - type: str - availability_zone: - description: - - Aliyun availability zone ID in which to launch the instance. - If it is not specified, it will be allocated by system automatically. - aliases: ['alicloud_zone', 'zone_id'] - type: str - image_id: - description: - - Image ID used to launch instances. Required when I(state=present) and creating new ECS instances. - aliases: ['image'] - type: str - instance_type: - description: - - Instance type used to launch instances. Required when I(state=present) and creating new ECS instances. - aliases: ['type'] - type: str - security_groups: - description: - - A list of security group IDs. - aliases: ['group_ids'] - type: list - elements: str - vswitch_id: - description: - - The subnet ID in which to launch the instances (VPC). - aliases: ['subnet_id'] - type: str - instance_name: - description: - - The name of ECS instance, which is a string of 2 to 128 Chinese or English characters. It must begin with an - uppercase/lowercase letter or a Chinese character and can contain numerals, ".", "_" or "-". - It cannot begin with http:// or https://. - aliases: ['name'] - type: str + state: description: - description: - - The description of ECS instance, which is a string of 2 to 256 characters. It cannot begin with http:// or https://. - type: str - internet_charge_type: - description: - - Internet charge type of ECS instance. - default: 'PayByBandwidth' - choices: ['PayByBandwidth', 'PayByTraffic'] - type: str - max_bandwidth_in: - description: - - Maximum incoming bandwidth from the public network, measured in Mbps (Megabits per second). - default: 200 - type: int - max_bandwidth_out: - description: - - Maximum outgoing bandwidth to the public network, measured in Mbps (Megabits per second). - Required when I(allocate_public_ip=true). Ignored when I(allocate_public_ip=false). - default: 0 - type: int - host_name: - description: - - Instance host name. Ordered hostname is not supported. - type: str - unique_suffix: - description: - - Specifies whether to add sequential suffixes to the host_name. - The sequential suffix ranges from 001 to 999. - default: false - type: bool - version_added: '0.2.0' - password: - description: - - The password to login instance. After rebooting instances, modified password will take effect. - type: str - system_disk_category: - description: - - Category of the system disk. - default: 'cloud_efficiency' - choices: ['cloud_efficiency', 'cloud_ssd'] - type: str - system_disk_size: - description: - - Size of the system disk, in GB. The valid values are 40~500. - default: 40 - type: int - system_disk_name: - description: - - Name of the system disk. - type: str - system_disk_description: - description: - - Description of the system disk. - type: str - count: - description: - - The number of the new instance. An integer value which indicates how many instances that match I(count_tag) - should be running. Instances are either created or terminated based on this value. - default: 1 - type: int - count_tag: - description: - - I(count) determines how many instances based on a specific tag criteria should be present. - This can be expressed in multiple ways and is shown in the EXAMPLES section. - The specified count_tag must already exist or be passed in as the I(tags) option. - If it is not specified, it will be replaced by I(instance_name). - type: str - allocate_public_ip: - description: - - Whether allocate a public ip for the new instance. - default: false - aliases: [ 'assign_public_ip' ] - type: bool - instance_charge_type: - description: - - The charge type of the instance. - choices: ['PrePaid', 'PostPaid'] - default: 'PostPaid' - type: str - period: - description: - - The charge duration of the instance, in months. Required when I(instance_charge_type=PrePaid). - - The valid value are [1-9, 12, 24, 36]. - default: 1 - type: int - auto_renew: - description: - - Whether automate renew the charge of the instance. - type: bool - default: false - auto_renew_period: - description: - - The duration of the automatic renew the charge of the instance. Required when I(auto_renew=true). - choices: [1, 2, 3, 6, 12] - type: int - instance_ids: - description: - - A list of instance ids. It is required when need to operate existing instances. - If it is specified, I(count) will lose efficacy. - type: list - elements: str - force: - description: - - Whether the current operation needs to be execute forcibly. - default: false - type: bool - tags: - description: - - A hash/dictionaries of instance tags, to add to the new instance or for starting/stopping instance by tag. C({"key":"value"}) - aliases: ["instance_tags"] - type: dict - version_added: '0.2.0' - purge_tags: - description: - - Delete any tags not specified in the task that are on the instance. - If True, it means you have to specify all the desired tags on each task affecting an instance. - default: false - type: bool - version_added: '0.2.0' - key_name: - description: - - The name of key pair which is used to access ECS instance in SSH. - required: false - type: str - aliases: ['keypair'] - user_data: - description: - - User-defined data to customize the startup behaviors of an ECS instance and to pass data into an ECS instance. - It only will take effect when launching the new ECS instances. - required: false - type: str - ram_role_name: - description: - - The name of the instance RAM role. - type: str - version_added: '0.2.0' - spot_price_limit: - description: - - The maximum hourly price for the preemptible instance. This parameter supports a maximum of three decimal - places and takes effect when the SpotStrategy parameter is set to SpotWithPriceLimit. - type: float - version_added: '0.2.0' - spot_strategy: - description: - - The bidding mode of the pay-as-you-go instance. This parameter is valid when InstanceChargeType is set to PostPaid. - choices: ['NoSpot', 'SpotWithPriceLimit', 'SpotAsPriceGo'] - default: 'NoSpot' - type: str - version_added: '0.2.0' - period_unit: - description: - - The duration unit that you will buy the resource. It is valid when I(instance_charge_type=PrePaid). - choices: ['Month', 'Week'] - default: 'Month' - type: str - version_added: '0.2.0' - dry_run: - description: - - Specifies whether to send a dry-run request. - - If I(dry_run=true), Only a dry-run request is sent and no instance is created. The system checks whether the - required parameters are set, and validates the request format, service permissions, and available ECS instances. - If the validation fails, the corresponding error code is returned. If the validation succeeds, the DryRunOperation error code is returned. - - If I(dry_run=false), A request is sent. If the validation succeeds, the instance is created. - default: false - type: bool - version_added: '0.2.0' - include_data_disks: - description: - - Whether to change instance disks charge type when changing instance charge type. - default: true - type: bool - version_added: '0.2.0' + - The state of the instance after operating. + default: 'present' + choices: ['present', 'running', 'stopped', 'restarted', 'absent'] + type: str + availability_zone: + description: + - Aliyun availability zone ID in which to launch the instance. If it is not specified, it is allocated by system automatically. + aliases: ['alicloud_zone', 'zone_id'] + type: str + image_id: + description: + - Image ID used to launch instances. Required when O(state=present) and creating new ECS instances. + aliases: ['image'] + type: str + instance_type: + description: + - Instance type used to launch instances. Required when O(state=present) and creating new ECS instances. + aliases: ['type'] + type: str + security_groups: + description: + - A list of security group IDs. + aliases: ['group_ids'] + type: list + elements: str + vswitch_id: + description: + - The subnet ID in which to launch the instances (VPC). + aliases: ['subnet_id'] + type: str + instance_name: + description: + - The name of ECS instance, which is a string of 2 to 128 Chinese or English characters. It must begin with an uppercase/lowercase + letter or a Chinese character and can contain numerals, V(.), V(_) or V(-). It cannot begin with V(http://) or V(https://). + aliases: ['name'] + type: str + description: + description: + - The description of ECS instance, which is a string of 2 to 256 characters. It cannot begin with V(http://) or V(https://). + type: str + internet_charge_type: + description: + - Internet charge type of ECS instance. + default: 'PayByBandwidth' + choices: ['PayByBandwidth', 'PayByTraffic'] + type: str + max_bandwidth_in: + description: + - Maximum incoming bandwidth from the public network, measured in Mbps (Megabits per second). + default: 200 + type: int + max_bandwidth_out: + description: + - Maximum outgoing bandwidth to the public network, measured in Mbps (Megabits per second). Required when O(allocate_public_ip=true). + Ignored when O(allocate_public_ip=false). + default: 0 + type: int + host_name: + description: + - Instance host name. Ordered hostname is not supported. + type: str + unique_suffix: + description: + - Specifies whether to add sequential suffixes to the host_name. The sequential suffix ranges from 001 to 999. + default: false + type: bool + version_added: '0.2.0' + password: + description: + - The password to login instance. After rebooting instances, modified password is effective. + type: str + system_disk_category: + description: + - Category of the system disk. + default: 'cloud_efficiency' + choices: ['cloud_efficiency', 'cloud_ssd'] + type: str + system_disk_size: + description: + - Size of the system disk, in GB. The valid values are V(40)~V(500). + default: 40 + type: int + system_disk_name: + description: + - Name of the system disk. + type: str + system_disk_description: + description: + - Description of the system disk. + type: str + count: + description: + - The number of the new instance. An integer value which indicates how many instances that match O(count_tag) should + be running. Instances are either created or terminated based on this value. + default: 1 + type: int + count_tag: + description: + - O(count) determines how many instances based on a specific tag criteria should be present. This can be expressed in + multiple ways and is shown in the EXAMPLES section. The specified count_tag must already exist or be passed in as + the O(tags) option. If it is not specified, it is replaced by O(instance_name). + type: str + allocate_public_ip: + description: + - Whether allocate a public IP for the new instance. + default: false + aliases: ['assign_public_ip'] + type: bool + instance_charge_type: + description: + - The charge type of the instance. + choices: ['PrePaid', 'PostPaid'] + default: 'PostPaid' + type: str + period: + description: + - The charge duration of the instance, in months. Required when O(instance_charge_type=PrePaid). + - The valid value are [V(1-9), V(12), V(24), V(36)]. + default: 1 + type: int + auto_renew: + description: + - Whether automate renew the charge of the instance. + type: bool + default: false + auto_renew_period: + description: + - The duration of the automatic renew the charge of the instance. Required when O(auto_renew=true). + choices: [1, 2, 3, 6, 12] + type: int + instance_ids: + description: + - A list of instance IDs. It is required when need to operate existing instances. If it is specified, O(count) is ignored. + type: list + elements: str + force: + description: + - Whether the current operation needs to be execute forcibly. + default: false + type: bool + tags: + description: + - A hash/dictionaries of instance tags, to add to the new instance or for starting/stopping instance by tag. V({"key":"value"}). + aliases: ["instance_tags"] + type: dict + version_added: '0.2.0' + purge_tags: + description: + - Delete any tags not specified in the task that are on the instance. If V(true), it means you have to specify all the + desired tags on each task affecting an instance. + default: false + type: bool + version_added: '0.2.0' + key_name: + description: + - The name of key pair which is used to access ECS instance in SSH. + required: false + type: str + aliases: ['keypair'] + user_data: + description: + - User-defined data to customize the startup behaviors of an ECS instance and to pass data into an ECS instance. It + only takes effect when launching the new ECS instances. + required: false + type: str + ram_role_name: + description: + - The name of the instance RAM role. + type: str + version_added: '0.2.0' + spot_price_limit: + description: + - The maximum hourly price for the preemptible instance. This parameter supports a maximum of three decimal places and + takes effect when the SpotStrategy parameter is set to SpotWithPriceLimit. + type: float + version_added: '0.2.0' + spot_strategy: + description: + - The bidding mode of the pay-as-you-go instance. This parameter is valid when O(instance_charge_type=PostPaid). + choices: ['NoSpot', 'SpotWithPriceLimit', 'SpotAsPriceGo'] + default: 'NoSpot' + type: str + version_added: '0.2.0' + period_unit: + description: + - The duration unit that you are buying the resource. It is valid when O(instance_charge_type=PrePaid). + choices: ['Month', 'Week'] + default: 'Month' + type: str + version_added: '0.2.0' + dry_run: + description: + - Specifies whether to send a dry-run request. + - If O(dry_run=true), Only a dry-run request is sent and no instance is created. The system checks whether the required + parameters are set, and validates the request format, service permissions, and available ECS instances. If the validation + fails, the corresponding error code is returned. If the validation succeeds, the DryRunOperation error code is returned. + - If O(dry_run=false), a request is sent. If the validation succeeds, the instance is created. + default: false + type: bool + version_added: '0.2.0' + include_data_disks: + description: + - Whether to change instance disks charge type when changing instance charge type. + default: true + type: bool + version_added: '0.2.0' author: - - "He Guimin (@xiaozhu36)" + - "He Guimin (@xiaozhu36)" requirements: - - "python >= 3.6" - - "footmark >= 1.19.0" + - "Python >= 3.6" + - "footmark >= 1.19.0" extends_documentation_fragment: - - community.general.alicloud -''' + - community.general.alicloud + - community.general.attributes +""" -EXAMPLES = ''' +EXAMPLES = r""" # basic provisioning example vpc network - name: Basic provisioning example hosts: localhost @@ -292,7 +291,7 @@ EXAMPLES = ''' internet_charge_type: '{{ internet_charge_type }}' max_bandwidth_out: '{{ max_bandwidth_out }}' tags: - Name: created_one + Name: created_one host_name: '{{ host_name }}' password: '{{ password }}' @@ -310,11 +309,11 @@ EXAMPLES = ''' internet_charge_type: '{{ internet_charge_type }}' max_bandwidth_out: '{{ max_bandwidth_out }}' tags: - Name: created_one - Version: 0.1 + Name: created_one + Version: 0.1 count: 2 count_tag: - Name: created_one + Name: created_one host_name: '{{ host_name }}' password: '{{ password }}' @@ -342,278 +341,278 @@ EXAMPLES = ''' alicloud_region: '{{ alicloud_region }}' instance_ids: '{{ instance_ids }}' security_groups: '{{ security_groups }}' -''' +""" -RETURN = ''' +RETURN = r""" instances: - description: List of ECS instances - returned: always - type: complex - contains: - availability_zone: - description: The availability zone of the instance is in. - returned: always - type: str - sample: cn-beijing-a - block_device_mappings: - description: Any block device mapping entries for the instance. - returned: always - type: complex - contains: - device_name: - description: The device name exposed to the instance (for example, /dev/xvda). - returned: always - type: str - sample: /dev/xvda - attach_time: - description: The time stamp when the attachment initiated. - returned: always - type: str - sample: "2018-06-25T04:08:26Z" - delete_on_termination: - description: Indicates whether the volume is deleted on instance termination. - returned: always - type: bool - sample: true - status: - description: The attachment state. - returned: always - type: str - sample: in_use - volume_id: - description: The ID of the cloud disk. - returned: always - type: str - sample: d-2zei53pjsi117y6gf9t6 - cpu: - description: The CPU core count of the instance. - returned: always - type: int - sample: 4 - creation_time: - description: The time the instance was created. - returned: always - type: str - sample: "2018-06-25T04:08Z" - description: - description: The instance description. - returned: always - type: str - sample: "my ansible instance" - eip: - description: The attribution of EIP associated with the instance. - returned: always - type: complex - contains: - allocation_id: - description: The ID of the EIP. - returned: always - type: str - sample: eip-12345 - internet_charge_type: - description: The internet charge type of the EIP. - returned: always - type: str - sample: "paybybandwidth" - ip_address: - description: EIP address. - returned: always - type: str - sample: 42.10.2.2 - expired_time: - description: The time the instance will expire. - returned: always - type: str - sample: "2099-12-31T15:59Z" - gpu: - description: The attribution of instance GPU. - returned: always - type: complex - contains: - amount: - description: The count of the GPU. - returned: always - type: int - sample: 0 - spec: - description: The specification of the GPU. - returned: always - type: str - sample: "" - host_name: - description: The host name of the instance. - returned: always - type: str - sample: iZ2zewaoZ - id: - description: Alias of instance_id. - returned: always - type: str - sample: i-abc12345 - instance_id: - description: ECS instance resource ID. - returned: always - type: str - sample: i-abc12345 - image_id: - description: The ID of the image used to launch the instance. - returned: always - type: str - sample: m-0011223344 - inner_ip_address: - description: The inner IPv4 address of the classic instance. - returned: always - type: str - sample: 10.0.0.2 - instance_charge_type: - description: The instance charge type. - returned: always - type: str - sample: PostPaid - instance_name: - description: The name of the instance. - returned: always - type: str - sample: my-ecs - instance_type: - description: The instance type of the running instance. - returned: always - type: str - sample: ecs.sn1ne.xlarge - instance_type_family: - description: The instance type family of the instance belongs. - returned: always - type: str - sample: ecs.sn1ne - internet_charge_type: - description: The billing method of the network bandwidth. - returned: always - type: str - sample: PayByBandwidth - internet_max_bandwidth_in: - description: Maximum incoming bandwidth from the internet network. - returned: always - type: int - sample: 200 - internet_max_bandwidth_out: - description: Maximum incoming bandwidth from the internet network. - returned: always - type: int - sample: 20 - io_optimized: - description: Indicates whether the instance is optimized for EBS I/O. - returned: always - type: bool - sample: false - memory: - description: Memory size of the instance. - returned: always - type: int - sample: 8192 - network_interfaces: - description: One or more network interfaces for the instance. - returned: always - type: complex - contains: - mac_address: - description: The MAC address. - returned: always - type: str - sample: "00:11:22:33:44:55" - network_interface_id: - description: The ID of the network interface. - returned: always - type: str - sample: eni-01234567 - primary_ip_address: - description: The primary IPv4 address of the network interface within the vswitch. - returned: always - type: str - sample: 10.0.0.1 - osname: - description: The operation system name of the instance owned. - returned: always - type: str - sample: CentOS - ostype: - description: The operation system type of the instance owned. - returned: always - type: str - sample: linux - private_ip_address: - description: The IPv4 address of the network interface within the subnet. - returned: always - type: str - sample: 10.0.0.1 - public_ip_address: - description: The public IPv4 address assigned to the instance or eip address - returned: always - type: str - sample: 43.0.0.1 - resource_group_id: - description: The id of the resource group to which the instance belongs. - returned: always - type: str - sample: my-ecs-group - security_groups: - description: One or more security groups for the instance. - returned: always - type: list - elements: dict - contains: - group_id: - description: The ID of the security group. - returned: always - type: str - sample: sg-0123456 - group_name: - description: The name of the security group. - returned: always - type: str - sample: my-security-group - status: - description: The current status of the instance. - returned: always - type: str - sample: running - tags: - description: Any tags assigned to the instance. - returned: always - type: dict - sample: - user_data: - description: User-defined data. - returned: always - type: dict - sample: - vswitch_id: - description: The ID of the vswitch in which the instance is running. - returned: always - type: str - sample: vsw-dew00abcdef - vpc_id: - description: The ID of the VPC the instance is in. - returned: always - type: str - sample: vpc-0011223344 - spot_price_limit: - description: - - The maximum hourly price for the preemptible instance. - returned: always - type: float - sample: 0.97 - spot_strategy: - description: - - The bidding mode of the pay-as-you-go instance. + description: List of ECS instances. + returned: always + type: complex + contains: + availability_zone: + description: The availability zone of the instance is in. + returned: always + type: str + sample: cn-beijing-a + block_device_mappings: + description: Any block device mapping entries for the instance. + returned: always + type: complex + contains: + device_name: + description: The device name exposed to the instance. returned: always type: str - sample: NoSpot + sample: /dev/xvda + attach_time: + description: The time stamp when the attachment initiated. + returned: always + type: str + sample: "2018-06-25T04:08:26Z" + delete_on_termination: + description: Indicates whether the volume is deleted on instance termination. + returned: always + type: bool + sample: true + status: + description: The attachment state. + returned: always + type: str + sample: in_use + volume_id: + description: The ID of the cloud disk. + returned: always + type: str + sample: d-2zei53pjsi117y6gf9t6 + cpu: + description: The CPU core count of the instance. + returned: always + type: int + sample: 4 + creation_time: + description: The time the instance was created. + returned: always + type: str + sample: "2018-06-25T04:08Z" + description: + description: The instance description. + returned: always + type: str + sample: "my ansible instance" + eip: + description: The attribution of EIP associated with the instance. + returned: always + type: complex + contains: + allocation_id: + description: The ID of the EIP. + returned: always + type: str + sample: eip-12345 + internet_charge_type: + description: The internet charge type of the EIP. + returned: always + type: str + sample: "paybybandwidth" + ip_address: + description: EIP address. + returned: always + type: str + sample: 42.10.2.2 + expired_time: + description: The time the instance expires. + returned: always + type: str + sample: "2099-12-31T15:59Z" + gpu: + description: The attribution of instance GPU. + returned: always + type: complex + contains: + amount: + description: The count of the GPU. + returned: always + type: int + sample: 0 + spec: + description: The specification of the GPU. + returned: always + type: str + sample: "" + host_name: + description: The host name of the instance. + returned: always + type: str + sample: iZ2zewaoZ + id: + description: Alias of instance_id. + returned: always + type: str + sample: i-abc12345 + instance_id: + description: ECS instance resource ID. + returned: always + type: str + sample: i-abc12345 + image_id: + description: The ID of the image used to launch the instance. + returned: always + type: str + sample: m-0011223344 + inner_ip_address: + description: The inner IPv4 address of the classic instance. + returned: always + type: str + sample: 10.0.0.2 + instance_charge_type: + description: The instance charge type. + returned: always + type: str + sample: PostPaid + instance_name: + description: The name of the instance. + returned: always + type: str + sample: my-ecs + instance_type: + description: The instance type of the running instance. + returned: always + type: str + sample: ecs.sn1ne.xlarge + instance_type_family: + description: The instance type family of the instance belongs. + returned: always + type: str + sample: ecs.sn1ne + internet_charge_type: + description: The billing method of the network bandwidth. + returned: always + type: str + sample: PayByBandwidth + internet_max_bandwidth_in: + description: Maximum incoming bandwidth from the internet network. + returned: always + type: int + sample: 200 + internet_max_bandwidth_out: + description: Maximum incoming bandwidth from the internet network. + returned: always + type: int + sample: 20 + io_optimized: + description: Indicates whether the instance is optimized for EBS I/O. + returned: always + type: bool + sample: false + memory: + description: Memory size of the instance. + returned: always + type: int + sample: 8192 + network_interfaces: + description: One or more network interfaces for the instance. + returned: always + type: complex + contains: + mac_address: + description: The MAC address. + returned: always + type: str + sample: "00:11:22:33:44:55" + network_interface_id: + description: The ID of the network interface. + returned: always + type: str + sample: eni-01234567 + primary_ip_address: + description: The primary IPv4 address of the network interface within the vswitch. + returned: always + type: str + sample: 10.0.0.1 + osname: + description: The operation system name of the instance owned. + returned: always + type: str + sample: CentOS + ostype: + description: The operation system type of the instance owned. + returned: always + type: str + sample: linux + private_ip_address: + description: The IPv4 address of the network interface within the subnet. + returned: always + type: str + sample: 10.0.0.1 + public_ip_address: + description: The public IPv4 address assigned to the instance or eip address. + returned: always + type: str + sample: 43.0.0.1 + resource_group_id: + description: The ID of the resource group to which the instance belongs. + returned: always + type: str + sample: my-ecs-group + security_groups: + description: One or more security groups for the instance. + returned: always + type: list + elements: dict + contains: + group_id: + description: The ID of the security group. + returned: always + type: str + sample: sg-0123456 + group_name: + description: The name of the security group. + returned: always + type: str + sample: my-security-group + status: + description: The current status of the instance. + returned: always + type: str + sample: running + tags: + description: Any tags assigned to the instance. + returned: always + type: dict + sample: + user_data: + description: User-defined data. + returned: always + type: dict + sample: + vswitch_id: + description: The ID of the vswitch in which the instance is running. + returned: always + type: str + sample: vsw-dew00abcdef + vpc_id: + description: The ID of the VPC the instance is in. + returned: always + type: str + sample: vpc-0011223344 + spot_price_limit: + description: + - The maximum hourly price for the preemptible instance. + returned: always + type: float + sample: 0.97 + spot_strategy: + description: + - The bidding mode of the pay-as-you-go instance. + returned: always + type: str + sample: NoSpot ids: - description: List of ECS instance IDs - returned: always - type: list - sample: [i-12345er, i-3245fs] -''' + description: List of ECS instance IDs. + returned: always + type: list + sample: ["i-12345er", "i-3245fs"] +""" import re import time diff --git a/plugins/modules/ali_instance_info.py b/plugins/modules/ali_instance_info.py index f489f96372..31550c4d0a 100644 --- a/plugins/modules/ali_instance_info.py +++ b/plugins/modules/ali_instance_info.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -20,51 +19,51 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see http://www.gnu.org/licenses/. -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ali_instance_info short_description: Gather information on instances of Alibaba Cloud ECS description: - - This module fetches data from the Open API in Alicloud. - The module must be called from within the ECS instance itself. - - This module was called C(ali_instance_facts) before Ansible 2.9. The usage did not change. + - This module fetches data from the Open API in Alicloud. The module must be called from within the ECS instance itself. +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: - name_prefix: - description: - - Use a instance name prefix to filter ecs instances. - type: str - version_added: '0.2.0' - tags: - description: - - A hash/dictionaries of instance tags. C({"key":"value"}) - aliases: ["instance_tags"] - type: dict - filters: - description: - - A dict of filters to apply. Each dict item consists of a filter key and a filter value. The filter keys can be - all of request parameters. See U(https://www.alibabacloud.com/help/doc-detail/25506.htm) for parameter details. - Filter keys can be same as request parameter name or be lower case and use underscore ("_") or dash ("-") to - connect different words in one parameter. 'InstanceIds' should be a list. - 'Tag.n.Key' and 'Tag.n.Value' should be a dict and using I(tags) instead. - type: dict - version_added: '0.2.0' + name_prefix: + description: + - Use a instance name prefix to filter ECS instances. + type: str + version_added: '0.2.0' + tags: + description: + - A hash/dictionaries of instance tags. C({"key":"value"}). + aliases: ["instance_tags"] + type: dict + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. The filter keys can be all + of request parameters. See U(https://www.alibabacloud.com/help/doc-detail/25506.htm) for parameter details. Filter + keys can be same as request parameter name or be lower case and use underscore (V("_")) or dash (V("-")) to connect + different words in one parameter. C(InstanceIds) should be a list. C(Tag.n.Key) and C(Tag.n.Value) should be a dict + and using O(tags) instead. + type: dict + version_added: '0.2.0' author: - - "He Guimin (@xiaozhu36)" + - "He Guimin (@xiaozhu36)" requirements: - - "python >= 3.6" - - "footmark >= 1.13.0" + - "Python >= 3.6" + - "footmark >= 1.13.0" extends_documentation_fragment: - - community.general.alicloud - - community.general.attributes - - community.general.attributes.info_module -''' + - community.general.alicloud + - community.general.attributes + - community.general.attributes.info_module +""" -EXAMPLES = ''' +EXAMPLES = r""" # Fetch instances details according to setting different filters - name: Find all instances in the specified region @@ -87,261 +86,261 @@ EXAMPLES = ''' community.general.ali_instance_info: tags: Test: "add" -''' +""" -RETURN = ''' +RETURN = r""" instances: - description: List of ECS instances - returned: always - type: complex - contains: - availability_zone: - description: The availability zone of the instance is in. - returned: always - type: str - sample: cn-beijing-a - block_device_mappings: - description: Any block device mapping entries for the instance. - returned: always - type: complex - contains: - device_name: - description: The device name exposed to the instance (for example, /dev/xvda). - returned: always - type: str - sample: /dev/xvda - attach_time: - description: The time stamp when the attachment initiated. - returned: always - type: str - sample: "2018-06-25T04:08:26Z" - delete_on_termination: - description: Indicates whether the volume is deleted on instance termination. - returned: always - type: bool - sample: true - status: - description: The attachment state. - returned: always - type: str - sample: in_use - volume_id: - description: The ID of the cloud disk. - returned: always - type: str - sample: d-2zei53pjsi117y6gf9t6 - cpu: - description: The CPU core count of the instance. - returned: always - type: int - sample: 4 - creation_time: - description: The time the instance was created. - returned: always - type: str - sample: "2018-06-25T04:08Z" - description: - description: The instance description. - returned: always - type: str - sample: "my ansible instance" - eip: - description: The attribution of EIP associated with the instance. - returned: always - type: complex - contains: - allocation_id: - description: The ID of the EIP. - returned: always - type: str - sample: eip-12345 - internet_charge_type: - description: The internet charge type of the EIP. - returned: always - type: str - sample: "paybybandwidth" - ip_address: - description: EIP address. - returned: always - type: str - sample: 42.10.2.2 - expired_time: - description: The time the instance will expire. - returned: always - type: str - sample: "2099-12-31T15:59Z" - gpu: - description: The attribution of instance GPU. - returned: always - type: complex - contains: - amount: - description: The count of the GPU. - returned: always - type: int - sample: 0 - spec: - description: The specification of the GPU. - returned: always - type: str - sample: "" - host_name: - description: The host name of the instance. - returned: always - type: str - sample: iZ2zewaoZ - id: - description: Alias of instance_id. - returned: always - type: str - sample: i-abc12345 - instance_id: - description: ECS instance resource ID. - returned: always - type: str - sample: i-abc12345 - image_id: - description: The ID of the image used to launch the instance. - returned: always - type: str - sample: m-0011223344 - inner_ip_address: - description: The inner IPv4 address of the classic instance. - returned: always - type: str - sample: 10.0.0.2 - instance_charge_type: - description: The instance charge type. - returned: always - type: str - sample: PostPaid - instance_name: - description: The name of the instance. - returned: always - type: str - sample: my-ecs - instance_type_family: - description: The instance type family of the instance belongs. - returned: always - type: str - sample: ecs.sn1ne - instance_type: - description: The instance type of the running instance. - returned: always - type: str - sample: ecs.sn1ne.xlarge - internet_charge_type: - description: The billing method of the network bandwidth. - returned: always - type: str - sample: PayByBandwidth - internet_max_bandwidth_in: - description: Maximum incoming bandwidth from the internet network. - returned: always - type: int - sample: 200 - internet_max_bandwidth_out: - description: Maximum incoming bandwidth from the internet network. - returned: always - type: int - sample: 20 - io_optimized: - description: Indicates whether the instance is optimized for EBS I/O. - returned: always - type: bool - sample: false - memory: - description: Memory size of the instance. - returned: always - type: int - sample: 8192 - network_interfaces: - description: One or more network interfaces for the instance. - returned: always - type: complex - contains: - mac_address: - description: The MAC address. - returned: always - type: str - sample: "00:11:22:33:44:55" - network_interface_id: - description: The ID of the network interface. - returned: always - type: str - sample: eni-01234567 - primary_ip_address: - description: The primary IPv4 address of the network interface within the vswitch. - returned: always - type: str - sample: 10.0.0.1 - osname: - description: The operation system name of the instance owned. - returned: always - type: str - sample: CentOS - ostype: - description: The operation system type of the instance owned. - returned: always - type: str - sample: linux - private_ip_address: - description: The IPv4 address of the network interface within the subnet. - returned: always - type: str - sample: 10.0.0.1 - public_ip_address: - description: The public IPv4 address assigned to the instance or eip address - returned: always - type: str - sample: 43.0.0.1 - resource_group_id: - description: The id of the resource group to which the instance belongs. - returned: always - type: str - sample: my-ecs-group - security_groups: - description: One or more security groups for the instance. - returned: always - type: list - elements: dict - contains: - group_id: - description: The ID of the security group. - returned: always - type: str - sample: sg-0123456 - group_name: - description: The name of the security group. - returned: always - type: str - sample: my-security-group + description: List of ECS instances. + returned: always + type: complex + contains: + availability_zone: + description: The availability zone of the instance is in. + returned: always + type: str + sample: cn-beijing-a + block_device_mappings: + description: Any block device mapping entries for the instance. + returned: always + type: complex + contains: + device_name: + description: The device name exposed to the instance (for example, /dev/xvda). + returned: always + type: str + sample: /dev/xvda + attach_time: + description: The time stamp when the attachment initiated. + returned: always + type: str + sample: "2018-06-25T04:08:26Z" + delete_on_termination: + description: Indicates whether the volume is deleted on instance termination. + returned: always + type: bool + sample: true status: - description: The current status of the instance. - returned: always - type: str - sample: running - tags: - description: Any tags assigned to the instance. - returned: always - type: dict - sample: - vswitch_id: - description: The ID of the vswitch in which the instance is running. - returned: always - type: str - sample: vsw-dew00abcdef - vpc_id: - description: The ID of the VPC the instance is in. - returned: always - type: str - sample: vpc-0011223344 + description: The attachment state. + returned: always + type: str + sample: in_use + volume_id: + description: The ID of the cloud disk. + returned: always + type: str + sample: d-2zei53pjsi117y6gf9t6 + cpu: + description: The CPU core count of the instance. + returned: always + type: int + sample: 4 + creation_time: + description: The time the instance was created. + returned: always + type: str + sample: "2018-06-25T04:08Z" + description: + description: The instance description. + returned: always + type: str + sample: "my ansible instance" + eip: + description: The attribution of EIP associated with the instance. + returned: always + type: complex + contains: + allocation_id: + description: The ID of the EIP. + returned: always + type: str + sample: eip-12345 + internet_charge_type: + description: The internet charge type of the EIP. + returned: always + type: str + sample: "paybybandwidth" + ip_address: + description: EIP address. + returned: always + type: str + sample: 42.10.2.2 + expired_time: + description: The time the instance expires. + returned: always + type: str + sample: "2099-12-31T15:59Z" + gpu: + description: The attribution of instance GPU. + returned: always + type: complex + contains: + amount: + description: The count of the GPU. + returned: always + type: int + sample: 0 + spec: + description: The specification of the GPU. + returned: always + type: str + sample: "" + host_name: + description: The host name of the instance. + returned: always + type: str + sample: iZ2zewaoZ + id: + description: Alias of instance_id. + returned: always + type: str + sample: i-abc12345 + instance_id: + description: ECS instance resource ID. + returned: always + type: str + sample: i-abc12345 + image_id: + description: The ID of the image used to launch the instance. + returned: always + type: str + sample: m-0011223344 + inner_ip_address: + description: The inner IPv4 address of the classic instance. + returned: always + type: str + sample: 10.0.0.2 + instance_charge_type: + description: The instance charge type. + returned: always + type: str + sample: PostPaid + instance_name: + description: The name of the instance. + returned: always + type: str + sample: my-ecs + instance_type_family: + description: The instance type family of the instance belongs. + returned: always + type: str + sample: ecs.sn1ne + instance_type: + description: The instance type of the running instance. + returned: always + type: str + sample: ecs.sn1ne.xlarge + internet_charge_type: + description: The billing method of the network bandwidth. + returned: always + type: str + sample: PayByBandwidth + internet_max_bandwidth_in: + description: Maximum incoming bandwidth from the internet network. + returned: always + type: int + sample: 200 + internet_max_bandwidth_out: + description: Maximum incoming bandwidth from the internet network. + returned: always + type: int + sample: 20 + io_optimized: + description: Indicates whether the instance is optimized for EBS I/O. + returned: always + type: bool + sample: false + memory: + description: Memory size of the instance. + returned: always + type: int + sample: 8192 + network_interfaces: + description: One or more network interfaces for the instance. + returned: always + type: complex + contains: + mac_address: + description: The MAC address. + returned: always + type: str + sample: "00:11:22:33:44:55" + network_interface_id: + description: The ID of the network interface. + returned: always + type: str + sample: eni-01234567 + primary_ip_address: + description: The primary IPv4 address of the network interface within the vswitch. + returned: always + type: str + sample: 10.0.0.1 + osname: + description: The operation system name of the instance owned. + returned: always + type: str + sample: CentOS + ostype: + description: The operation system type of the instance owned. + returned: always + type: str + sample: linux + private_ip_address: + description: The IPv4 address of the network interface within the subnet. + returned: always + type: str + sample: 10.0.0.1 + public_ip_address: + description: The public IPv4 address assigned to the instance or EIP address. + returned: always + type: str + sample: 43.0.0.1 + resource_group_id: + description: The ID of the resource group to which the instance belongs. + returned: always + type: str + sample: my-ecs-group + security_groups: + description: One or more security groups for the instance. + returned: always + type: list + elements: dict + contains: + group_id: + description: The ID of the security group. + returned: always + type: str + sample: sg-0123456 + group_name: + description: The name of the security group. + returned: always + type: str + sample: my-security-group + status: + description: The current status of the instance. + returned: always + type: str + sample: running + tags: + description: Any tags assigned to the instance. + returned: always + type: dict + sample: + vswitch_id: + description: The ID of the vswitch in which the instance is running. + returned: always + type: str + sample: vsw-dew00abcdef + vpc_id: + description: The ID of the VPC the instance is in. + returned: always + type: str + sample: vpc-0011223344 ids: - description: List of ECS instance IDs - returned: always - type: list - sample: [i-12345er, i-3245fs] -''' + description: List of ECS instance IDs. + returned: always + type: list + sample: ["i-12345er", "i-3245fs"] +""" from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible_collections.community.general.plugins.module_utils.alicloud_ecs import ( diff --git a/plugins/modules/alternatives.py b/plugins/modules/alternatives.py index 48cacb4540..ad26d04578 100644 --- a/plugins/modules/alternatives.py +++ b/plugins/modules/alternatives.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2014, Gabe Mulley # Copyright (c) 2015, David Wittman @@ -7,21 +6,26 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: alternatives short_description: Manages alternative programs for common commands description: - - Manages symbolic links using the 'update-alternatives' tool. - - Useful when multiple programs are installed but provide similar functionality (e.g. different editors). + - Manages symbolic links using the C(update-alternatives) tool. + - Useful when multiple programs are installed but provide similar functionality (for example, different editors). author: - - Marius Rieder (@jiuka) - - David Wittman (@DavidWittman) - - Gabe Mulley (@mulby) + - Marius Rieder (@jiuka) + - David Wittman (@DavidWittman) + - Gabe Mulley (@mulby) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full options: name: description: @@ -32,27 +36,31 @@ options: description: - The path to the real executable that the link should point to. type: path - required: true + family: + description: + - The family groups similar alternatives. This option is available only on RHEL-based distributions. + type: str + version_added: 10.1.0 link: description: - The path to the symbolic link that should point to the real executable. - - This option is always required on RHEL-based distributions. On Debian-based distributions this option is - required when the alternative I(name) is unknown to the system. + - This option is always required on RHEL-based distributions. On Debian-based distributions this option is required + when the alternative O(name) is unknown to the system. type: path priority: description: - - The priority of the alternative. If no priority is given for creation C(50) is used as a fallback. + - The priority of the alternative. If no priority is given for creation V(50) is used as a fallback. type: int state: description: - - C(present) - install the alternative (if not already installed), but do - not set it as the currently selected alternative for the group. - - C(selected) - install the alternative (if not already installed), and - set it as the currently selected alternative for the group. - - C(auto) - install the alternative (if not already installed), and - set the group to auto mode. Added in community.general 5.1.0. - - C(absent) - removes the alternative. Added in community.general 5.1.0. - choices: [ present, selected, auto, absent ] + - V(present) - install the alternative (if not already installed), but do not set it as the currently selected alternative + for the group. + - V(selected) - install the alternative (if not already installed), and set it as the currently selected alternative + for the group. + - V(auto) - install the alternative (if not already installed), and set the group to auto mode. Added in community.general + 5.1.0. + - V(absent) - removes the alternative. Added in community.general 5.1.0. + choices: [present, selected, auto, absent] default: selected type: str version_added: 4.8.0 @@ -60,8 +68,7 @@ options: description: - A list of subcommands. - Each subcommand needs a name, a link and a path parameter. - - Subcommands are also named 'slaves' or 'followers', depending on the version - of alternatives. + - Subcommands are also named C(slaves) or C(followers), depending on the version of C(alternatives). type: list elements: dict aliases: ['slaves'] @@ -82,15 +89,21 @@ options: type: path required: true version_added: 5.1.0 -requirements: [ update-alternatives ] -''' +requirements: [update-alternatives] +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Correct java version selected community.general.alternatives: name: java path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java +- name: Select java-11-openjdk.x86_64 family + community.general.alternatives: + name: java + family: java-11-openjdk.x86_64 + when: ansible_os_family == 'RedHat' + - name: Alternatives link created community.general.alternatives: name: hadoop-conf @@ -126,7 +139,7 @@ EXAMPLES = r''' - name: keytool link: /usr/bin/keytool path: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/keytool -''' +""" import os import re @@ -175,17 +188,25 @@ class AlternativesModule(object): subcommands_parameter = self.module.params['subcommands'] priority_parameter = self.module.params['priority'] if ( - self.path not in self.current_alternatives or - (priority_parameter is not None and self.current_alternatives[self.path].get('priority') != priority_parameter) or - (subcommands_parameter is not None and ( - not all(s in subcommands_parameter for s in self.current_alternatives[self.path].get('subcommands')) or - not all(s in self.current_alternatives[self.path].get('subcommands') for s in subcommands_parameter) - )) + self.path is not None and ( + self.path not in self.current_alternatives or + (priority_parameter is not None and self.current_alternatives[self.path].get('priority') != priority_parameter) or + (subcommands_parameter is not None and ( + not all(s in subcommands_parameter for s in self.current_alternatives[self.path].get('subcommands')) or + not all(s in self.current_alternatives[self.path].get('subcommands') for s in subcommands_parameter) + )) + ) ): self.install() # Check if we need to set the preference - if self.mode_selected and self.current_path != self.path: + is_same_path = self.path is not None and self.current_path == self.path + is_same_family = False + if self.current_path is not None and self.current_path in self.current_alternatives: + current_alternative = self.current_alternatives[self.current_path] + is_same_family = current_alternative.get('family') == self.family + + if self.mode_selected and not (is_same_path or is_same_family): self.set() # Check if we need to reset to auto @@ -206,6 +227,8 @@ class AlternativesModule(object): self.module.fail_json(msg='Needed to install the alternative, but unable to do so as we are missing the link') cmd = [self.UPDATE_ALTERNATIVES, '--install', self.link, self.name, self.path, str(self.priority)] + if self.family is not None: + cmd.extend(["--family", self.family]) if self.module.params['subcommands'] is not None: subcommands = [['--slave', subcmd['link'], subcmd['name'], subcmd['path']] for subcmd in self.subcommands] @@ -221,6 +244,7 @@ class AlternativesModule(object): self.result['diff']['after'] = dict( state=AlternativeState.PRESENT, path=self.path, + family=self.family, priority=self.priority, link=self.link, ) @@ -241,9 +265,15 @@ class AlternativesModule(object): self.result['diff']['after'] = dict(state=AlternativeState.ABSENT) def set(self): - cmd = [self.UPDATE_ALTERNATIVES, '--set', self.name, self.path] + # Path takes precedence over family as it is more specific + if self.path is None: + arg = self.family + else: + arg = self.path + + cmd = [self.UPDATE_ALTERNATIVES, '--set', self.name, arg] self.result['changed'] = True - self.messages.append("Set alternative '%s' for '%s'." % (self.path, self.name)) + self.messages.append("Set alternative '%s' for '%s'." % (arg, self.name)) if not self.module.check_mode: self.module.run_command(cmd, check_rc=True) @@ -270,6 +300,10 @@ class AlternativesModule(object): def path(self): return self.module.params.get('path') + @property + def family(self): + return self.module.params.get('family') + @property def link(self): return self.module.params.get('link') or self.current_link @@ -314,7 +348,7 @@ class AlternativesModule(object): current_link_regex = re.compile(r'^\s*link \w+ is (.*)$', re.MULTILINE) subcmd_path_link_regex = re.compile(r'^\s*(?:slave|follower) (\S+) is (.*)$', re.MULTILINE) - alternative_regex = re.compile(r'^(\/.*)\s-\s(?:family\s\S+\s)?priority\s(\d+)((?:\s+(?:slave|follower).*)*)', re.MULTILINE) + alternative_regex = re.compile(r'^(\/.*)\s-\s(?:family\s(\S+)\s)?priority\s(\d+)((?:\s+(?:slave|follower).*)*)', re.MULTILINE) subcmd_regex = re.compile(r'^\s+(?:slave|follower) (.*): (.*)$', re.MULTILINE) match = current_mode_regex.search(display_output) @@ -337,11 +371,12 @@ class AlternativesModule(object): subcmd_path_map = dict(subcmd_path_link_regex.findall(display_output)) if not subcmd_path_map and self.subcommands: - subcmd_path_map = dict((s['name'], s['link']) for s in self.subcommands) + subcmd_path_map = {s['name']: s['link'] for s in self.subcommands} - for path, prio, subcmd in alternative_regex.findall(display_output): + for path, family, prio, subcmd in alternative_regex.findall(display_output): self.current_alternatives[path] = dict( priority=int(prio), + family=family, subcommands=[dict( name=name, path=spath, @@ -376,7 +411,8 @@ def main(): module = AnsibleModule( argument_spec=dict( name=dict(type='str', required=True), - path=dict(type='path', required=True), + path=dict(type='path'), + family=dict(type='str'), link=dict(type='path'), priority=dict(type='int'), state=dict( @@ -391,6 +427,7 @@ def main(): )), ), supports_check_mode=True, + required_one_of=[('path', 'family')] ) AlternativesModule(module) diff --git a/plugins/modules/android_sdk.py b/plugins/modules/android_sdk.py new file mode 100644 index 0000000000..35900f39a5 --- /dev/null +++ b/plugins/modules/android_sdk.py @@ -0,0 +1,207 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Stanislav Shamilov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: android_sdk +short_description: Manages Android SDK packages +description: + - Manages Android SDK packages. + - Allows installation from different channels (stable, beta, dev, canary). + - Allows installation of packages to a non-default SDK root directory. +author: Stanislav Shamilov (@shamilovstas) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +version_added: 10.2.0 +options: + accept_licenses: + description: + - If this is set to V(true), the module attempts to accept license prompts generated by C(sdkmanager) during package + installation. Otherwise, every license prompt is rejected. + type: bool + default: false + name: + description: + - A name of an Android SDK package (for instance, V(build-tools;34.0.0)). + aliases: ['package', 'pkg'] + type: list + elements: str + state: + description: + - Indicates the desired package(s) state. + - V(present) ensures that package(s) is/are present. + - V(absent) ensures that package(s) is/are absent. + - V(latest) ensures that package(s) is/are installed and updated to the latest version(s). + choices: ['present', 'absent', 'latest'] + default: present + type: str + sdk_root: + description: + - Provides path for an alternative directory to install Android SDK packages to. By default, all packages are installed + to the directory where C(sdkmanager) is installed. + type: path + channel: + description: + - Indicates what channel must C(sdkmanager) use for installation of packages. + choices: ['stable', 'beta', 'dev', 'canary'] + default: stable + type: str +requirements: + - C(java) >= 17 + - C(sdkmanager) Command line tool for installing Android SDK packages. +notes: + - For some of the packages installed by C(sdkmanager) is it necessary to accept licenses. Usually it is done through command + line prompt in a form of a Y/N question when a licensed package is requested to be installed. If there are several packages + requested for installation and at least two of them belong to different licenses, the C(sdkmanager) tool prompts for these + licenses in a loop. In order to install packages, the module must be able to answer these license prompts. Currently, + it is only possible to answer one license prompt at a time, meaning that instead of installing multiple packages as a + single invocation of the C(sdkmanager --install) command, it is done by executing the command independently for each package. + This makes sure that at most only one license prompt needs to be answered. At the time of writing this module, a C(sdkmanager)'s + package may belong to at most one license type that needs to be accepted. However, if this changes in the future, the + module may hang as there might be more prompts generated by the C(sdkmanager) tool which the module is unable to answer. + If this becomes the case, file an issue and in the meantime, consider accepting all the licenses in advance, as it is + described in the C(sdkmanager) L(documentation,https://developer.android.com/tools/sdkmanager#accept-licenses), for instance, + using the M(ansible.builtin.command) module. +seealso: + - name: sdkmanager tool documentation + description: Detailed information of how to install and use sdkmanager command line tool. + link: https://developer.android.com/tools/sdkmanager +""" + +EXAMPLES = r""" +- name: Install build-tools;34.0.0 + community.general.android_sdk: + name: build-tools;34.0.0 + accept_licenses: true + state: present + +- name: Install build-tools;34.0.0 and platform-tools + community.general.android_sdk: + name: + - build-tools;34.0.0 + - platform-tools + accept_licenses: true + state: present + +- name: Delete build-tools;34.0.0 + community.general.android_sdk: + name: build-tools;34.0.0 + state: absent + +- name: Install platform-tools or update if installed + community.general.android_sdk: + name: platform-tools + accept_licenses: true + state: latest + +- name: Install build-tools;34.0.0 to a different SDK root + community.general.android_sdk: + name: build-tools;34.0.0 + accept_licenses: true + state: present + sdk_root: "/path/to/new/root" + +- name: Install a package from another channel + community.general.android_sdk: + name: some-package-present-in-canary-channel + accept_licenses: true + state: present + channel: canary +""" + +RETURN = r""" +installed: + description: A list of packages that have been installed. + returned: when packages have changed + type: list + sample: ["build-tools;34.0.0", "platform-tools"] + +removed: + description: A list of packages that have been removed. + returned: when packages have changed + type: list + sample: ["build-tools;34.0.0", "platform-tools"] +""" + +from ansible_collections.community.general.plugins.module_utils.mh.module_helper import StateModuleHelper +from ansible_collections.community.general.plugins.module_utils.android_sdkmanager import Package, AndroidSdkManager + + +class AndroidSdk(StateModuleHelper): + module = dict( + argument_spec=dict( + state=dict(type='str', default='present', choices=['present', 'absent', 'latest']), + package=dict(type='list', elements='str', aliases=['pkg', 'name']), + sdk_root=dict(type='path'), + channel=dict(type='str', default='stable', choices=['stable', 'beta', 'dev', 'canary']), + accept_licenses=dict(type='bool', default=False) + ), + supports_check_mode=True + ) + + def __init_module__(self): + self.sdkmanager = AndroidSdkManager(self.module) + self.vars.set('installed', [], change=True) + self.vars.set('removed', [], change=True) + + def _parse_packages(self): + arg_pkgs = set(self.vars.package) + if len(arg_pkgs) < len(self.vars.package): + self.do_raise("Packages may not repeat") + return set(Package(p) for p in arg_pkgs) + + def state_present(self): + packages = self._parse_packages() + installed = self.sdkmanager.get_installed_packages() + pending_installation = packages.difference(installed) + + self.vars.installed = AndroidSdk._map_packages_to_names(pending_installation) + if not self.check_mode: + rc, stdout, stderr = self.sdkmanager.apply_packages_changes(pending_installation, self.vars.accept_licenses) + if rc != 0: + self.do_raise("Could not install packages: %s" % stderr) + + def state_absent(self): + packages = self._parse_packages() + installed = self.sdkmanager.get_installed_packages() + to_be_deleted = packages.intersection(installed) + self.vars.removed = AndroidSdk._map_packages_to_names(to_be_deleted) + if not self.check_mode: + rc, stdout, stderr = self.sdkmanager.apply_packages_changes(to_be_deleted) + if rc != 0: + self.do_raise("Could not uninstall packages: %s" % stderr) + + def state_latest(self): + packages = self._parse_packages() + installed = self.sdkmanager.get_installed_packages() + updatable = self.sdkmanager.get_updatable_packages() + not_installed = packages.difference(installed) + to_be_installed = not_installed.union(updatable) + self.vars.installed = AndroidSdk._map_packages_to_names(to_be_installed) + + if not self.check_mode: + rc, stdout, stderr = self.sdkmanager.apply_packages_changes(to_be_installed, self.vars.accept_licenses) + if rc != 0: + self.do_raise("Could not install packages: %s" % stderr) + + @staticmethod + def _map_packages_to_names(packages): + return [x.name for x in packages] + + +def main(): + AndroidSdk.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ansible_galaxy_install.py b/plugins/modules/ansible_galaxy_install.py index 694591d8cb..919dadcd9a 100644 --- a/plugins/modules/ansible_galaxy_install.py +++ b/plugins/modules/ansible_galaxy_install.py @@ -1,14 +1,12 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2021, Alexei Znamensky # # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = """ +DOCUMENTATION = r""" module: ansible_galaxy_install author: - "Alexei Znamensky (@russoz)" @@ -17,46 +15,62 @@ version_added: 3.5.0 description: - This module allows the installation of Ansible collections or roles using C(ansible-galaxy). notes: - - > - B(Ansible 2.9/2.10): The C(ansible-galaxy) command changed significantly between Ansible 2.9 and - ansible-base 2.10 (later ansible-core 2.11). See comments in the parameters. - - > - The module will try and run using the C(C.UTF-8) locale. - If that fails, it will try C(en_US.UTF-8). - If that one also fails, the module will fail. + - Support for B(Ansible 2.9/2.10) was removed in community.general 8.0.0. + - The module tries to run using the C(C.UTF-8) locale. If that fails, it tries C(en_US.UTF-8). If that one also fails, the + module fails. +seealso: + - name: C(ansible-galaxy) command manual page + description: Manual page for the command. + link: https://docs.ansible.com/ansible/latest/cli/ansible-galaxy.html + requirements: - - Ansible 2.9, ansible-base 2.10, or ansible-core 2.11 or newer + - ansible-core 2.11 or newer +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: + state: + description: + - If O(state=present) then the collection or role is installed. Note that the collections and roles are not updated + with this option. + - Currently the O(state=latest) is ignored unless O(type=collection), and it ensures the collection is installed and + updated to the latest available version. + - Please note that O(force=true) can be used to perform upgrade regardless of O(type). + type: str + choices: [present, latest] + default: present + version_added: 9.1.0 type: description: - The type of installation performed by C(ansible-galaxy). - - If I(type) is C(both), then I(requirements_file) must be passed and it may contain both roles and collections. - - "Note however that the opposite is not true: if using a I(requirements_file), then I(type) can be any of the three choices." - - "B(Ansible 2.9): The option C(both) will have the same effect as C(role)." + - If O(type=both), then O(requirements_file) must be passed and it may contain both roles and collections. + - 'Note however that the opposite is not true: if using a O(requirements_file), then O(type) can be any of the three + choices.' type: str choices: [collection, role, both] required: true name: description: - Name of the collection or role being installed. - - > - Versions can be specified with C(ansible-galaxy) usual formats. - For example, the collection C(community.docker:1.6.1) or the role C(ansistrano.deploy,3.8.0). - - I(name) and I(requirements_file) are mutually exclusive. + - Versions can be specified with C(ansible-galaxy) usual formats. For example, the collection V(community.docker:1.6.1) + or the role V(ansistrano.deploy,3.8.0). + - O(name) and O(requirements_file) are mutually exclusive. type: str requirements_file: description: - Path to a file containing a list of requirements to be installed. - - It works for I(type) equals to C(collection) and C(role). - - I(name) and I(requirements_file) are mutually exclusive. - - "B(Ansible 2.9): It can only be used to install either I(type=role) or I(type=collection), but not both at the same run." + - It works for O(type) equals to V(collection) and V(role). + - O(name) and O(requirements_file) are mutually exclusive. type: path dest: description: - - The path to the directory containing your collections or roles, according to the value of I(type). - - > - Please notice that C(ansible-galaxy) will not install collections with I(type=both), when I(requirements_file) - contains both roles and collections and I(dest) is specified. + - The path to the directory containing your collections or roles, according to the value of O(type). + - Please notice that C(ansible-galaxy) does not install collections with O(type=both), when O(requirements_file) contains + both roles and collections and O(dest) is specified. type: path no_deps: description: @@ -66,31 +80,14 @@ options: default: false force: description: - - Force overwriting an existing role or collection. - - Using I(force=true) is mandatory when downgrading. - - "B(Ansible 2.9 and 2.10): Must be C(true) to upgrade roles and collections." - type: bool - default: false - ack_ansible29: - description: - - Acknowledge using Ansible 2.9 with its limitations, and prevents the module from generating warnings about them. - - This option is completely ignored if using a version of Ansible greater than C(2.9.x). - - Note that this option will be removed without any further deprecation warning once support - for Ansible 2.9 is removed from this module. - type: bool - default: false - ack_min_ansiblecore211: - description: - - Acknowledge the module is deprecating support for Ansible 2.9 and ansible-base 2.10. - - Support for those versions will be removed in community.general 8.0.0. - At the same time, this option will be removed without any deprecation warning! - - This option is completely ignored if using a version of ansible-core/ansible-base/Ansible greater than C(2.11). - - For the sake of conciseness, setting this parameter to C(true) implies I(ack_ansible29=true). + - Force overwriting existing roles and/or collections. + - It can be used for upgrading, but the module output always reports C(changed=true). + - Using O(force=true) is mandatory when downgrading. type: bool default: false """ -EXAMPLES = """ +EXAMPLES = r""" - name: Install collection community.network community.general.ansible_galaxy_install: type: collection @@ -112,83 +109,86 @@ EXAMPLES = """ type: collection name: community.network:3.0.2 force: true - """ -RETURN = """ - type: - description: The value of the I(type) parameter. - type: str - returned: always - name: - description: The value of the I(name) parameter. - type: str - returned: always - dest: - description: The value of the I(dest) parameter. - type: str - returned: always - requirements_file: - description: The value of the I(requirements_file) parameter. - type: str - returned: always - force: - description: The value of the I(force) parameter. - type: bool - returned: always - installed_roles: - description: - - If I(requirements_file) is specified instead, returns dictionary with all the roles installed per path. - - If I(name) is specified, returns that role name and the version installed per path. - - "B(Ansible 2.9): Returns empty because C(ansible-galaxy) has no C(list) subcommand." - type: dict - returned: always when installing roles - contains: - "": - description: Roles and versions for that path. - type: dict - sample: - /home/user42/.ansible/roles: - ansistrano.deploy: 3.9.0 - baztian.xfce: v0.0.3 - /custom/ansible/roles: - ansistrano.deploy: 3.8.0 - installed_collections: - description: - - If I(requirements_file) is specified instead, returns dictionary with all the collections installed per path. - - If I(name) is specified, returns that collection name and the version installed per path. - - "B(Ansible 2.9): Returns empty because C(ansible-galaxy) has no C(list) subcommand." - type: dict - returned: always when installing collections - contains: - "": - description: Collections and versions for that path - type: dict - sample: - /home/az/.ansible/collections/ansible_collections: - community.docker: 1.6.0 - community.general: 3.0.2 - /custom/ansible/ansible_collections: - community.general: 3.1.0 - new_collections: - description: New collections installed by this module. - returned: success - type: dict - sample: - community.general: 3.1.0 - community.docker: 1.6.1 - new_roles: - description: New roles installed by this module. - returned: success - type: dict - sample: - ansistrano.deploy: 3.8.0 +RETURN = r""" +type: + description: The value of the O(type) parameter. + type: str + returned: always +name: + description: The value of the O(name) parameter. + type: str + returned: always +dest: + description: The value of the O(dest) parameter. + type: str + returned: always +requirements_file: + description: The value of the O(requirements_file) parameter. + type: str + returned: always +force: + description: The value of the O(force) parameter. + type: bool + returned: always +installed_roles: + description: + - If O(requirements_file) is specified instead, returns dictionary with all the roles installed per path. + - If O(name) is specified, returns that role name and the version installed per path. + type: dict + returned: always when installing roles + contains: + "": + description: Roles and versions for that path. + type: dict + sample: + /home/user42/.ansible/roles: + ansistrano.deploy: 3.9.0 baztian.xfce: v0.0.3 + /custom/ansible/roles: + ansistrano.deploy: 3.8.0 +installed_collections: + description: + - If O(requirements_file) is specified instead, returns dictionary with all the collections installed per path. + - If O(name) is specified, returns that collection name and the version installed per path. + type: dict + returned: always when installing collections + contains: + "": + description: Collections and versions for that path. + type: dict + sample: + /home/az/.ansible/collections/ansible_collections: + community.docker: 1.6.0 + community.general: 3.0.2 + /custom/ansible/ansible_collections: + community.general: 3.1.0 +new_collections: + description: New collections installed by this module. + returned: success + type: dict + sample: + community.general: 3.1.0 + community.docker: 1.6.1 +new_roles: + description: New roles installed by this module. + returned: success + type: dict + sample: + ansistrano.deploy: 3.8.0 + baztian.xfce: v0.0.3 +version: + description: Version of ansible-core for ansible-galaxy. + type: str + returned: always + sample: 2.17.4 + version_added: 10.0.0 """ import re -from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt as fmt +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper, ModuleHelperException @@ -197,21 +197,21 @@ class AnsibleGalaxyInstall(ModuleHelper): _RE_LIST_PATH = re.compile(r'^# (?P.*)$') _RE_LIST_COLL = re.compile(r'^(?P\w+\.\w+)\s+(?P[\d\.]+)\s*$') _RE_LIST_ROLE = re.compile(r'^- (?P\w+\.\w+),\s+(?P[\d\.]+)\s*$') - _RE_INSTALL_OUTPUT = None # Set after determining ansible version, see __init_module__() + _RE_INSTALL_OUTPUT = re.compile( + r'^(?:(?P\w+\.\w+):(?P[\d\.]+)|- (?P\w+\.\w+) \((?P[\d\.]+)\)) was installed successfully$' + ) ansible_version = None - is_ansible29 = None output_params = ('type', 'name', 'dest', 'requirements_file', 'force', 'no_deps') module = dict( argument_spec=dict( + state=dict(type='str', choices=['present', 'latest'], default='present'), type=dict(type='str', choices=('collection', 'role', 'both'), required=True), name=dict(type='str'), requirements_file=dict(type='path'), dest=dict(type='path'), force=dict(type='bool', default=False), no_deps=dict(type='bool', default=False), - ack_ansible29=dict(type='bool', default=False), - ack_min_ansiblecore211=dict(type='bool', default=False), ), mutually_exclusive=[('name', 'requirements_file')], required_one_of=[('name', 'requirements_file')], @@ -221,14 +221,15 @@ class AnsibleGalaxyInstall(ModuleHelper): command = 'ansible-galaxy' command_args_formats = dict( - type=fmt.as_func(lambda v: [] if v == 'both' else [v]), - galaxy_cmd=fmt.as_list(), - requirements_file=fmt.as_opt_val('-r'), - dest=fmt.as_opt_val('-p'), - force=fmt.as_bool("--force"), - no_deps=fmt.as_bool("--no-deps"), - version=fmt.as_bool("--version"), - name=fmt.as_list(), + type=cmd_runner_fmt.as_func(lambda v: [] if v == 'both' else [v]), + galaxy_cmd=cmd_runner_fmt.as_list(), + upgrade=cmd_runner_fmt.as_bool("--upgrade"), + requirements_file=cmd_runner_fmt.as_opt_val('-r'), + dest=cmd_runner_fmt.as_opt_val('-p'), + force=cmd_runner_fmt.as_bool("--force"), + no_deps=cmd_runner_fmt.as_bool("--no-deps"), + version=cmd_runner_fmt.as_fixed("--version"), + name=cmd_runner_fmt.as_list(), ) def _make_runner(self, lang): @@ -246,41 +247,28 @@ class AnsibleGalaxyInstall(ModuleHelper): if not match: self.do_raise("Unable to determine ansible-galaxy version from: {0}".format(line)) version = match.group("version") - version = tuple(int(x) for x in version.split('.')[:3]) return version try: runner = self._make_runner("C.UTF-8") with runner("version", check_rc=False, output_process=process) as ctx: - return runner, ctx.run(version=True) - except UnsupportedLocale as e: + return runner, ctx.run() + except UnsupportedLocale: runner = self._make_runner("en_US.UTF-8") with runner("version", check_rc=True, output_process=process) as ctx: - return runner, ctx.run(version=True) + return runner, ctx.run() def __init_module__(self): - # self.runner = CmdRunner(self.module, command=self.command, arg_formats=self.command_args_formats, force_lang=self.force_lang) - self.runner, self.ansible_version = self._get_ansible_galaxy_version() - if self.ansible_version < (2, 11) and not self.vars.ack_min_ansiblecore211: - self.module.deprecate( - "Support for Ansible 2.9 and ansible-base 2.10 is being deprecated. " - "At the same time support for them is ended, also the ack_ansible29 option will be removed. " - "Upgrading is strongly recommended, or set 'ack_min_ansiblecore211' to suppress this message.", - version="8.0.0", - collection_name="community.general", - ) - self.is_ansible29 = self.ansible_version < (2, 10) - if self.is_ansible29: - self._RE_INSTALL_OUTPUT = re.compile(r"^(?:.*Installing '(?P\w+\.\w+):(?P[\d\.]+)'.*" - r'|- (?P\w+\.\w+) \((?P[\d\.]+)\)' - r' was installed successfully)$') - else: - # Collection install output changed: - # ansible-base 2.10: "coll.name (x.y.z)" - # ansible-core 2.11+: "coll.name:x.y.z" - self._RE_INSTALL_OUTPUT = re.compile(r'^(?:(?P\w+\.\w+)(?: \(|:)(?P[\d\.]+)\)?' - r'|- (?P\w+\.\w+) \((?P[\d\.]+)\))' - r' was installed successfully$') + self.runner, self.vars.version = self._get_ansible_galaxy_version() + self.ansible_version = tuple(int(x) for x in self.vars.version.split('.')[:3]) + if self.ansible_version < (2, 11): + self.module.fail_json(msg="Support for Ansible 2.9 and ansible-base 2.10 has been removed.") + self.vars.set("new_collections", {}, change=True) + self.vars.set("new_roles", {}, change=True) + if self.vars.type != "collection": + self.vars.installed_roles = self._list_roles() + if self.vars.type != "roles": + self.vars.installed_collections = self._list_collections() def _list_element(self, _type, path_re, elem_re): def process(rc, out, err): @@ -315,24 +303,8 @@ class AnsibleGalaxyInstall(ModuleHelper): def _list_roles(self): return self._list_element('role', self._RE_LIST_PATH, self._RE_LIST_ROLE) - def _setup29(self): - self.vars.set("new_collections", {}) - self.vars.set("new_roles", {}) - self.vars.set("ansible29_change", False, change=True, output=False) - if not (self.vars.ack_ansible29 or self.vars.ack_min_ansiblecore211): - self.warn("Ansible 2.9 or older: unable to retrieve lists of roles and collections already installed") - if self.vars.requirements_file is not None and self.vars.type == 'both': - self.warn("Ansible 2.9 or older: will install only roles from requirement files") - - def _setup210plus(self): - self.vars.set("new_collections", {}, change=True) - self.vars.set("new_roles", {}, change=True) - if self.vars.type != "collection": - self.vars.installed_roles = self._list_roles() - if self.vars.type != "roles": - self.vars.installed_collections = self._list_collections() - def __run__(self): + def process(rc, out, err): for line in out.splitlines(): match = self._RE_INSTALL_OUTPUT.match(line) @@ -340,21 +312,12 @@ class AnsibleGalaxyInstall(ModuleHelper): continue if match.group("collection"): self.vars.new_collections[match.group("collection")] = match.group("cversion") - if self.is_ansible29: - self.vars.ansible29_change = True elif match.group("role"): self.vars.new_roles[match.group("role")] = match.group("rversion") - if self.is_ansible29: - self.vars.ansible29_change = True - if self.is_ansible29: - if self.vars.type == 'both': - raise ValueError("Type 'both' not supported in Ansible 2.9") - self._setup29() - else: - self._setup210plus() - with self.runner("type galaxy_cmd force no_deps dest requirements_file name", output_process=process) as ctx: - ctx.run(galaxy_cmd="install") + upgrade = (self.vars.type == "collection" and self.vars.state == "latest") + with self.runner("type galaxy_cmd upgrade force no_deps dest requirements_file name", output_process=process) as ctx: + ctx.run(galaxy_cmd="install", upgrade=upgrade) if self.verbosity > 2: self.vars.set("run_info", ctx.run_info) diff --git a/plugins/modules/apache2_mod_proxy.py b/plugins/modules/apache2_mod_proxy.py index 70ab5a42ed..3b06736898 100644 --- a/plugins/modules/apache2_mod_proxy.py +++ b/plugins/modules/apache2_mod_proxy.py @@ -1,51 +1,55 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2016, Olivier Boukili # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: apache2_mod_proxy author: Olivier Boukili (@oboukili) short_description: Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer pool description: - - Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer - pool, using HTTP POST and GET requests. The httpd mod_proxy balancer-member - status page has to be enabled and accessible, as this module relies on parsing - this page. This module supports ansible check_mode, and requires BeautifulSoup - python module. + - Set and/or get members' attributes of an Apache httpd 2.4 mod_proxy balancer pool, using HTTP POST and GET requests. The + httpd mod_proxy balancer-member status page has to be enabled and accessible, as this module relies on parsing this page. +extends_documentation_fragment: + - community.general.attributes +requirements: + - Python package C(beautifulsoup4) +attributes: + check_mode: + support: full + diff_mode: + support: none options: balancer_url_suffix: type: str description: - - Suffix of the balancer pool url required to access the balancer pool - status page (e.g. balancer_vhost[:port]/balancer_url_suffix). + - Suffix of the balancer pool URL required to access the balancer pool status page (for example V(balancer_vhost[:port]/balancer_url_suffix)). default: /balancer-manager/ balancer_vhost: type: str description: - - (ipv4|ipv6|fqdn):port of the Apache httpd 2.4 mod_proxy balancer pool. + - (IPv4|IPv6|FQDN):port of the Apache httpd 2.4 mod_proxy balancer pool. required: true member_host: type: str description: - - (ipv4|ipv6|fqdn) of the balancer member to get or to set attributes to. - Port number is autodetected and should not be specified here. - If undefined, apache2_mod_proxy module will return a members list of - dictionaries of all the current balancer pool members' attributes. + - (IPv4|IPv6|FQDN) of the balancer member to get or to set attributes to. Port number is autodetected and should not + be specified here. + - If undefined, the M(community.general.apache2_mod_proxy) module returns a members list of dictionaries of all the + current balancer pool members' attributes. state: - type: str + type: list + elements: str + choices: [present, absent, enabled, disabled, drained, hot_standby, ignore_errors] description: - Desired state of the member host. - (absent|disabled),drained,hot_standby,ignore_errors can be - simultaneously invoked by separating them with a comma (e.g. state=drained,ignore_errors). - - 'Accepted state values: ["present", "absent", "enabled", "disabled", "drained", "hot_standby", "ignore_errors"]' + - States can be simultaneously invoked by separating them with a comma (for example V(state=drained,ignore_errors)), + but it is recommended to specify them as a proper YAML list. + - States V(present) and V(absent) must be used without any other state. tls: description: - Use https to access balancer management page. @@ -56,9 +60,9 @@ options: - Validate ssl/tls certificates. type: bool default: true -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get all current balancer pool members attributes community.general.apache2_mod_proxy: balancer_vhost: 10.0.0.2 @@ -103,57 +107,61 @@ EXAMPLES = ''' member_host: '{{ member.host }}' state: absent delegate_to: myloadbalancernode -''' +""" -RETURN = ''' +RETURN = r""" member: - description: specific balancer member information dictionary, returned when apache2_mod_proxy module is invoked with member_host parameter. - type: dict - returned: success - sample: - {"attributes": - {"Busy": "0", - "Elected": "42", - "Factor": "1", - "From": "136K", - "Load": "0", - "Route": null, - "RouteRedir": null, - "Set": "0", - "Status": "Init Ok ", - "To": " 47K", - "Worker URL": null - }, - "balancer_url": "http://10.10.0.2/balancer-manager/", - "host": "10.10.0.20", - "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b", - "path": "/ws", - "port": 8080, - "protocol": "http", - "status": { - "disabled": false, - "drained": false, - "hot_standby": false, - "ignore_errors": false - } + description: Specific balancer member information dictionary, returned when the module is invoked with O(member_host) parameter. + type: dict + returned: success + sample: + { + "attributes": { + "Busy": "0", + "Elected": "42", + "Factor": "1", + "From": "136K", + "Load": "0", + "Route": null, + "RouteRedir": null, + "Set": "0", + "Status": "Init Ok ", + "To": " 47K", + "Worker URL": null + }, + "balancer_url": "http://10.10.0.2/balancer-manager/", + "host": "10.10.0.20", + "management_url": "http://10.10.0.2/lb/?b=mywsbalancer&w=http://10.10.0.20:8080/ws&nonce=8925436c-79c6-4841-8936-e7d13b79239b", + "path": "/ws", + "port": 8080, + "protocol": "http", + "status": { + "disabled": false, + "drained": false, + "hot_standby": false, + "ignore_errors": false } + } members: - description: list of member (defined above) dictionaries, returned when apache2_mod_proxy is invoked with no member_host and state args. - returned: success - type: list - sample: - [{"attributes": { - "Busy": "0", - "Elected": "42", - "Factor": "1", - "From": "136K", - "Load": "0", - "Route": null, - "RouteRedir": null, - "Set": "0", - "Status": "Init Ok ", - "To": " 47K", - "Worker URL": null + description: List of member (defined above) dictionaries, returned when the module is invoked with no O(member_host) and + O(state) args. + returned: success + type: list + sample: + [ + { + "attributes": { + "Busy": "0", + "Elected": "42", + "Factor": "1", + "From": "136K", + "Load": "0", + "Route": null, + "RouteRedir": null, + "Set": "0", + "Status": "Init Ok ", + "To": " 47K", + "Worker URL": null }, "balancer_url": "http://10.10.0.2/balancer-manager/", "host": "10.10.0.20", @@ -162,24 +170,25 @@ members: "port": 8080, "protocol": "http", "status": { - "disabled": false, - "drained": false, - "hot_standby": false, - "ignore_errors": false + "disabled": false, + "drained": false, + "hot_standby": false, + "ignore_errors": false } - }, - {"attributes": { - "Busy": "0", - "Elected": "42", - "Factor": "1", - "From": "136K", - "Load": "0", - "Route": null, - "RouteRedir": null, - "Set": "0", - "Status": "Init Ok ", - "To": " 47K", - "Worker URL": null + }, + { + "attributes": { + "Busy": "0", + "Elected": "42", + "Factor": "1", + "From": "136K", + "Load": "0", + "Route": null, + "RouteRedir": null, + "Set": "0", + "Status": "Init Ok ", + "To": " 47K", + "Worker URL": null }, "balancer_url": "http://10.10.0.2/balancer-manager/", "host": "10.10.0.21", @@ -188,42 +197,42 @@ members: "port": 8080, "protocol": "http", "status": { - "disabled": false, - "drained": false, - "hot_standby": false, - "ignore_errors": false} + "disabled": false, + "drained": false, + "hot_standby": false, + "ignore_errors": false } - ] -''' + } + ] +""" import re -import traceback -from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible_collections.community.general.plugins.module_utils import deps +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper, ModuleHelperException + +from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.urls import fetch_url -from ansible.module_utils.six import iteritems -BEAUTIFUL_SOUP_IMP_ERR = None -try: - from BeautifulSoup import BeautifulSoup -except ImportError: - BEAUTIFUL_SOUP_IMP_ERR = traceback.format_exc() - HAS_BEAUTIFULSOUP = False -else: - HAS_BEAUTIFULSOUP = True +with deps.declare("beautifulsoup4"): + from bs4 import BeautifulSoup # balancer member attributes extraction regexp: -EXPRESSION = r"(b=([\w\.\-]+)&w=(https?|ajp|wss?|ftp|[sf]cgi)://([\w\.\-]+):?(\d*)([/\w\.\-]*)&?[\w\-\=]*)" +EXPRESSION = re.compile(to_text(r"(b=([\w\.\-]+)&w=(https?|ajp|wss?|ftp|[sf]cgi)://([\w\.\-]+):?(\d*)([/\w\.\-]*)&?[\w\-\=]*)")) # Apache2 server version extraction regexp: -APACHE_VERSION_EXPRESSION = r"SERVER VERSION: APACHE/([\d.]+)" +APACHE_VERSION_EXPRESSION = re.compile(to_text(r"SERVER VERSION: APACHE/([\d.]+)")) + + +def find_all(where, what): + return where.find_all(what) def regexp_extraction(string, _regexp, groups=1): """ Returns the capture group (default=1) specified in the regexp, applied to the string """ - regexp_search = re.search(string=str(string), pattern=str(_regexp)) + regexp_search = _regexp.search(string) if regexp_search: if regexp_search.group(groups) != '': - return str(regexp_search.group(groups)) + return regexp_search.group(groups) return None @@ -244,33 +253,33 @@ class BalancerMember(object): """ def __init__(self, management_url, balancer_url, module): - self.host = regexp_extraction(management_url, str(EXPRESSION), 4) - self.management_url = str(management_url) + self.host = regexp_extraction(management_url, EXPRESSION, 4) + self.management_url = management_url self.protocol = regexp_extraction(management_url, EXPRESSION, 3) self.port = regexp_extraction(management_url, EXPRESSION, 5) self.path = regexp_extraction(management_url, EXPRESSION, 6) - self.balancer_url = str(balancer_url) + self.balancer_url = balancer_url self.module = module def get_member_attributes(self): """ Returns a dictionary of a balancer member's attributes.""" - balancer_member_page = fetch_url(self.module, self.management_url) + resp, info = fetch_url(self.module, self.management_url, headers={'Referer': self.management_url}) - if balancer_member_page[1]['status'] != 200: - self.module.fail_json(msg="Could not get balancer_member_page, check for connectivity! " + balancer_member_page[1]) - else: - try: - soup = BeautifulSoup(balancer_member_page[0]) - except TypeError as exc: - self.module.fail_json(msg="Cannot parse balancer_member_page HTML! " + str(exc)) - else: - subsoup = soup.findAll('table')[1].findAll('tr') - keys = subsoup[0].findAll('th') - for valuesset in subsoup[1::1]: - if re.search(pattern=self.host, string=str(valuesset)): - values = valuesset.findAll('td') - return dict((keys[x].string, values[x].string) for x in range(0, len(keys))) + if info['status'] != 200: + raise ModuleHelperException("Could not get balancer_member_page, check for connectivity! {0}".format(info)) + + try: + soup = BeautifulSoup(resp) + except TypeError as exc: + raise ModuleHelperException("Cannot parse balancer_member_page HTML! {0}".format(exc)) from exc + + subsoup = find_all(find_all(soup, 'table')[1], 'tr') + keys = find_all(subsoup[0], 'th') + for valuesset in subsoup[1::1]: + if re.search(pattern=self.host, string=str(valuesset)): + values = find_all(valuesset, 'td') + return {keys[x].string: values[x].string for x in range(0, len(keys))} def get_member_status(self): """ Returns a dictionary of a balancer member's status attributes.""" @@ -278,8 +287,8 @@ class BalancerMember(object): 'drained': 'Drn', 'hot_standby': 'Stby', 'ignore_errors': 'Ign'} - actual_status = str(self.attributes['Status']) - status = dict((mode, patt in actual_status) for mode, patt in iteritems(status_mapping)) + actual_status = self.attributes['Status'] + status = {mode: patt in actual_status for mode, patt in status_mapping.items()} return status def set_member_status(self, values): @@ -290,155 +299,125 @@ class BalancerMember(object): 'ignore_errors': '&w_status_I'} request_body = regexp_extraction(self.management_url, EXPRESSION, 1) - values_url = "".join("{0}={1}".format(url_param, 1 if values[mode] else 0) for mode, url_param in iteritems(values_mapping)) + values_url = "".join("{0}={1}".format(url_param, 1 if values[mode] else 0) for mode, url_param in values_mapping.items()) request_body = "{0}{1}".format(request_body, values_url) - response = fetch_url(self.module, self.management_url, data=request_body) - if response[1]['status'] != 200: - self.module.fail_json(msg="Could not set the member status! " + self.host + " " + response[1]['status']) + response, info = fetch_url(self.module, self.management_url, data=request_body, headers={'Referer': self.management_url}) + if info['status'] != 200: + raise ModuleHelperException("Could not set the member status! {0} {1}".format(self.host, info['status'])) attributes = property(get_member_attributes) status = property(get_member_status, set_member_status) + def as_dict(self): + return { + "host": self.host, + "status": self.status, + "protocol": self.protocol, + "port": self.port, + "path": self.path, + "attributes": self.attributes, + "management_url": self.management_url, + "balancer_url": self.balancer_url + } + class Balancer(object): """ Apache httpd 2.4 mod_proxy balancer object""" - def __init__(self, host, suffix, module, members=None, tls=False): - if tls: - self.base_url = 'https://' + str(host) - self.url = 'https://' + str(host) + str(suffix) - else: - self.base_url = 'http://' + str(host) - self.url = 'http://' + str(host) + str(suffix) + def __init__(self, module, host, suffix, tls=False): + proto = "https" if tls else "http" + self.base_url = '{0}://{1}'.format(proto, host) + self.url = '{0}://{1}{2}'.format(proto, host, suffix) self.module = module self.page = self.fetch_balancer_page() - if members is None: - self._members = [] def fetch_balancer_page(self): """ Returns the balancer management html page as a string for later parsing.""" - page = fetch_url(self.module, str(self.url)) - if page[1]['status'] != 200: - self.module.fail_json(msg="Could not get balancer page! HTTP status response: " + str(page[1]['status'])) - else: - content = page[0].read() - apache_version = regexp_extraction(content.upper(), APACHE_VERSION_EXPRESSION, 1) - if apache_version: - if not re.search(pattern=r"2\.4\.[\d]*", string=apache_version): - self.module.fail_json(msg="This module only acts on an Apache2 2.4+ instance, current Apache2 version: " + str(apache_version)) - return content - else: - self.module.fail_json(msg="Could not get the Apache server version from the balancer-manager") + resp, info = fetch_url(self.module, self.url) + if info['status'] != 200: + raise ModuleHelperException("Could not get balancer page! HTTP status response: {0}".format(info['status'])) + + content = to_text(resp.read()) + apache_version = regexp_extraction(content.upper(), APACHE_VERSION_EXPRESSION, 1) + if not apache_version: + raise ModuleHelperException("Could not get the Apache server version from the balancer-manager") + + if not re.search(pattern=r"2\.4\.[\d]*", string=apache_version): + raise ModuleHelperException("This module only acts on an Apache2 2.4+ instance, current Apache2 version: {0}".format(apache_version)) + return content def get_balancer_members(self): """ Returns members of the balancer as a generator object for later iteration.""" try: soup = BeautifulSoup(self.page) - except TypeError: - self.module.fail_json(msg="Cannot parse balancer page HTML! " + str(self.page)) - else: - for element in soup.findAll('a')[1::1]: - balancer_member_suffix = str(element.get('href')) - if not balancer_member_suffix: - self.module.fail_json(msg="Argument 'balancer_member_suffix' is empty!") - else: - yield BalancerMember(str(self.base_url + balancer_member_suffix), str(self.url), self.module) + except TypeError as e: + raise ModuleHelperException("Cannot parse balancer page HTML! {0}".format(self.page)) from e + + elements = find_all(soup, 'a') + for element in elements[1::1]: + balancer_member_suffix = element.get('href') + if not balancer_member_suffix: + raise ModuleHelperException("Argument 'balancer_member_suffix' is empty!") + + yield BalancerMember(self.base_url + balancer_member_suffix, self.url, self.module) members = property(get_balancer_members) -def main(): +class ApacheModProxy(ModuleHelper): """ Initiates module.""" - module = AnsibleModule( + module = dict( argument_spec=dict( balancer_vhost=dict(required=True, type='str'), balancer_url_suffix=dict(default="/balancer-manager/", type='str'), member_host=dict(type='str'), - state=dict(type='str'), + state=dict(type='list', elements='str', choices=['present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors']), tls=dict(default=False, type='bool'), validate_certs=dict(default=True, type='bool') ), supports_check_mode=True ) - if HAS_BEAUTIFULSOUP is False: - module.fail_json(msg=missing_required_lib('BeautifulSoup'), exception=BEAUTIFUL_SOUP_IMP_ERR) + def __init_module__(self): + deps.validate(self.module) - if module.params['state'] is not None: - states = module.params['state'].split(',') - if (len(states) > 1) and (("present" in states) or ("enabled" in states)): - module.fail_json(msg="state present/enabled is mutually exclusive with other states!") + if len(self.vars.state or []) > 1 and ("present" in self.vars.state or "enabled" in self.vars.state): + self.do_raise(msg="states present/enabled are mutually exclusive with other states!") + + self.mybalancer = Balancer(self.module, self.vars.balancer_vhost, self.vars.balancer_url_suffix, tls=self.vars.tls) + + def __run__(self): + if self.vars.member_host is None: + self.vars.members = [member.as_dict() for member in self.mybalancer.members] else: - for _state in states: - if _state not in ['present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors']: - module.fail_json( - msg="State can only take values amongst 'present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors'." - ) - else: - states = ['None'] + member_exists = False + member_status = {'disabled': False, 'drained': False, 'hot_standby': False, 'ignore_errors': False} + for mode in member_status: + for state in self.vars.state or []: + if mode == state: + member_status[mode] = True + elif mode == 'disabled' and state == 'absent': + member_status[mode] = True - mybalancer = Balancer(module.params['balancer_vhost'], - module.params['balancer_url_suffix'], - module=module, - tls=module.params['tls']) + for member in self.mybalancer.members: + if str(member.host) == self.vars.member_host: + member_exists = True + if self.vars.state is not None: + member_status_before = member.status + if not self.check_mode: + member_status_after = member.status = member_status + else: + member_status_after = member_status + self.changed |= (member_status_before != member_status_after) + self.vars.member = member.as_dict() - if module.params['member_host'] is None: - json_output_list = [] - for member in mybalancer.members: - json_output_list.append({ - "host": member.host, - "status": member.status, - "protocol": member.protocol, - "port": member.port, - "path": member.path, - "attributes": member.attributes, - "management_url": member.management_url, - "balancer_url": member.balancer_url - }) - module.exit_json( - changed=False, - members=json_output_list - ) - else: - changed = False - member_exists = False - member_status = {'disabled': False, 'drained': False, 'hot_standby': False, 'ignore_errors': False} - for mode in member_status.keys(): - for state in states: - if mode == state: - member_status[mode] = True - elif mode == 'disabled' and state == 'absent': - member_status[mode] = True + if not member_exists: + self.do_raise(msg='{0} is not a member of the balancer {1}!'.format(self.vars.member_host, self.vars.balancer_vhost)) - for member in mybalancer.members: - if str(member.host) == str(module.params['member_host']): - member_exists = True - if module.params['state'] is not None: - member_status_before = member.status - if not module.check_mode: - member_status_after = member.status = member_status - else: - member_status_after = member_status - if member_status_before != member_status_after: - changed = True - json_output = { - "host": member.host, - "status": member.status, - "protocol": member.protocol, - "port": member.port, - "path": member.path, - "attributes": member.attributes, - "management_url": member.management_url, - "balancer_url": member.balancer_url - } - if member_exists: - module.exit_json( - changed=changed, - member=json_output - ) - else: - module.fail_json(msg=str(module.params['member_host']) + ' is not a member of the balancer ' + str(module.params['balancer_vhost']) + '!') + +def main(): + ApacheModProxy.execute() if __name__ == '__main__': diff --git a/plugins/modules/apache2_module.py b/plugins/modules/apache2_module.py index e6998ad3f5..2421708262 100644 --- a/plugins/modules/apache2_module.py +++ b/plugins/modules/apache2_module.py @@ -1,67 +1,70 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2013-2014, Christian Berendt # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: apache2_module author: - - Christian Berendt (@berendt) - - Ralf Hertel (@n0trax) - - Robin Roth (@robinro) + - Christian Berendt (@berendt) + - Ralf Hertel (@n0trax) + - Robin Roth (@robinro) short_description: Enables/disables a module of the Apache2 webserver description: - - Enables or disables a specified module of the Apache2 webserver. + - Enables or disables a specified module of the Apache2 webserver. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - type: str - description: - - Name of the module to enable/disable as given to C(a2enmod/a2dismod). - required: true - identifier: - type: str - description: - - Identifier of the module as listed by C(apache2ctl -M). - This is optional and usually determined automatically by the common convention of - appending C(_module) to I(name) as well as custom exception for popular modules. - required: false - force: - description: - - Force disabling of default modules and override Debian warnings. - required: false - type: bool - default: false - state: - type: str - description: - - Desired state of the module. - choices: ['present', 'absent'] - default: present - ignore_configcheck: - description: - - Ignore configuration checks about inconsistent module configuration. Especially for mpm_* modules. - type: bool - default: false - warn_mpm_absent: - description: - - Control the behavior of the warning process for MPM modules. - type: bool - default: true - version_added: 6.3.0 -requirements: ["a2enmod","a2dismod"] + name: + type: str + description: + - Name of the module to enable/disable as given to C(a2enmod)/C(a2dismod). + required: true + identifier: + type: str + description: + - Identifier of the module as listed by C(apache2ctl -M). This is optional and usually determined automatically by the + common convention of appending V(_module) to O(name) as well as custom exception for popular modules. + required: false + force: + description: + - Force disabling of default modules and override Debian warnings. + required: false + type: bool + default: false + state: + type: str + description: + - Desired state of the module. + choices: ['present', 'absent'] + default: present + ignore_configcheck: + description: + - Ignore configuration checks about inconsistent module configuration. Especially for mpm_* modules. + type: bool + default: false + warn_mpm_absent: + description: + - Control the behavior of the warning process for MPM modules. + type: bool + default: true + version_added: 6.3.0 +requirements: ["a2enmod", "a2dismod"] notes: - - This does not work on RedHat-based distributions. It does work on Debian- and SuSE-based distributions. - Whether it works on others depend on whether the C(a2enmod) and C(a2dismod) tools are available or not. -''' + - This does not work on RedHat-based distributions. It does work on Debian- and SuSE-based distributions. Whether it works + on others depend on whether the C(a2enmod) and C(a2dismod) tools are available or not. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Enable the Apache2 module wsgi community.general.apache2_module: state: present @@ -91,40 +94,24 @@ EXAMPLES = ''' warn_mpm_absent: false ignore_configcheck: true loop: - - module: mpm_event - state: absent - - module: mpm_prefork - state: present + - module: mpm_event + state: absent + - module: mpm_prefork + state: present - name: Enable dump_io module, which is identified as dumpio_module inside apache2 community.general.apache2_module: state: present name: dump_io identifier: dumpio_module -''' +""" -RETURN = ''' +RETURN = r""" result: - description: message about action taken - returned: always - type: str -warnings: - description: list of warning messages - returned: when needed - type: list -rc: - description: return code of underlying command - returned: failed - type: int -stdout: - description: stdout of underlying command - returned: failed - type: str -stderr: - description: stderr of underlying command - returned: failed - type: str -''' + description: Message about action taken. + returned: always + type: str +""" import re @@ -147,7 +134,7 @@ def _get_ctl_binary(module): if ctl_binary is not None: return ctl_binary - module.fail_json(msg="Neither of apache2ctl nor apachctl found. At least one apache control binary is necessary.") + module.fail_json(msg="Neither of apache2ctl nor apachectl found. At least one apache control binary is necessary.") def _module_is_enabled(module): @@ -159,12 +146,12 @@ def _module_is_enabled(module): if module.params['ignore_configcheck']: if 'AH00534' in stderr and 'mpm_' in module.params['name']: if module.params['warn_mpm_absent']: - module.warnings.append( + module.warn( "No MPM module loaded! apache2 reload AND other module actions" " will fail if no MPM module is loaded immediately." ) else: - module.warnings.append(error_msg) + module.warn(error_msg) return False else: module.fail_json(msg=error_msg) @@ -189,6 +176,7 @@ def create_apache_identifier(name): # re expressions to extract subparts of names re_workarounds = [ + ('php8', re.compile(r'^(php)[\d\.]+')), ('php', re.compile(r'^(php\d)\.')), ] @@ -218,9 +206,7 @@ def _set_state(module, state): if _module_is_enabled(module) != want_enabled: if module.check_mode: - module.exit_json(changed=True, - result=success_msg, - warnings=module.warnings) + module.exit_json(changed=True, result=success_msg) a2mod_binary_path = module.get_bin_path(a2mod_binary) if a2mod_binary_path is None: @@ -235,9 +221,7 @@ def _set_state(module, state): result, stdout, stderr = module.run_command(a2mod_binary_cmd + [name]) if _module_is_enabled(module) == want_enabled: - module.exit_json(changed=True, - result=success_msg, - warnings=module.warnings) + module.exit_json(changed=True, result=success_msg) else: msg = ( 'Failed to set module {name} to {state}:\n' @@ -255,9 +239,7 @@ def _set_state(module, state): stdout=stdout, stderr=stderr) else: - module.exit_json(changed=False, - result=success_msg, - warnings=module.warnings) + module.exit_json(changed=False, result=success_msg) def main(): @@ -273,11 +255,9 @@ def main(): supports_check_mode=True, ) - module.warnings = [] - name = module.params['name'] - if name == 'cgi' and _run_threaded(module): - module.fail_json(msg="Your MPM seems to be threaded. No automatic actions on module cgi possible.") + if name == 'cgi' and module.params['state'] == 'present' and _run_threaded(module): + module.fail_json(msg="Your MPM seems to be threaded, therefore enabling cgi module is not allowed.") if not module.params['identifier']: module.params['identifier'] = create_apache_identifier(module.params['name']) diff --git a/plugins/modules/apk.py b/plugins/modules/apk.py index 831ab60749..d16635e3b4 100644 --- a/plugins/modules/apk.py +++ b/plugins/modules/apk.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2015, Kevin Brebanov # Based on pacman (Afterburn , Aaron Bull Schaefer ) @@ -8,27 +7,34 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: apk short_description: Manages apk packages description: - - Manages I(apk) packages for Alpine Linux. + - Manages C(apk) packages for Alpine Linux. author: "Kevin Brebanov (@kbrebanov)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: available: description: - - During upgrade, reset versioned world dependencies and change logic to prefer replacing or downgrading packages (instead of holding them) - if the currently installed package is no longer available from any repository. + - During upgrade, reset versioned world dependencies and change logic to prefer replacing or downgrading packages (instead + of holding them) if the currently installed package is no longer available from any repository. type: bool default: false name: description: - - A package name, like C(foo), or multiple packages, like C(foo, bar). + - A package name, like V(foo), or multiple packages, like V(foo,bar). + - Do not include additional whitespace when specifying multiple packages as a string. Prefer YAML lists over comma-separating + multiple package names. type: list elements: str no_cache: @@ -39,22 +45,22 @@ options: version_added: 1.0.0 repository: description: - - A package repository or multiple repositories. - Unlike with the underlying apk command, this list will override the system repositories rather than supplement them. + - A package repository or multiple repositories. Unlike with the underlying apk command, this list overrides the system + repositories rather than supplement them. type: list elements: str state: description: - Indicates the desired package(s) state. - - C(present) ensures the package(s) is/are present. C(installed) can be used as an alias. - - C(absent) ensures the package(s) is/are absent. C(removed) can be used as an alias. - - C(latest) ensures the package(s) is/are present and the latest version(s). + - V(present) ensures the package(s) is/are present. V(installed) can be used as an alias. + - V(absent) ensures the package(s) is/are absent. V(removed) can be used as an alias. + - V(latest) ensures the package(s) is/are present and the latest version(s). default: present - choices: [ "present", "absent", "latest", "installed", "removed" ] + choices: ["present", "absent", "latest", "installed", "removed"] type: str update_cache: description: - - Update repository indexes. Can be run with other steps or on it's own. + - Update repository indexes. Can be run with other steps or on its own. type: bool default: false upgrade: @@ -64,16 +70,18 @@ options: default: false world: description: - - Use a custom world file when checking for explicitly installed packages. + - Use a custom world file when checking for explicitly installed packages. The file is used only when a value is provided + for O(name), and O(state) is set to V(present) or V(latest). type: str default: /etc/apk/world version_added: 5.4.0 notes: - - 'I(name) and I(upgrade) are mutually exclusive.' - - When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the I(name) option. -''' + - O(name) and O(upgrade) are mutually exclusive. + - When used with a C(loop:) each package is processed individually, it is much more efficient to pass the list directly + to the O(name) option. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Update repositories and install foo package community.general.apk: name: foo @@ -147,15 +155,15 @@ EXAMPLES = ''' name: foo state: latest world: /etc/apk/world.custom -''' +""" -RETURN = ''' +RETURN = r""" packages: - description: a list of packages that have been changed - returned: when packages have changed - type: list - sample: ['package', 'other-package'] -''' + description: A list of packages that have been changed. + returned: when packages have changed + type: list + sample: ["package", "other-package"] +""" import re # Import module snippets. @@ -174,7 +182,7 @@ def parse_for_packages(stdout): def update_package_db(module, exit): - cmd = "%s update" % (APK_PATH) + cmd = APK_PATH + ["update"] rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc != 0: module.fail_json(msg="could not update package db", stdout=stdout, stderr=stderr) @@ -197,7 +205,7 @@ def query_toplevel(module, name, world): def query_package(module, name): - cmd = "%s -v info --installed %s" % (APK_PATH, name) + cmd = APK_PATH + ["-v", "info", "--installed", name] rc, stdout, stderr = module.run_command(cmd, check_rc=False) if rc == 0: return True @@ -206,7 +214,7 @@ def query_package(module, name): def query_latest(module, name): - cmd = "%s version %s" % (APK_PATH, name) + cmd = APK_PATH + ["version", name] rc, stdout, stderr = module.run_command(cmd, check_rc=False) search_pattern = r"(%s)-[\d\.\w]+-[\d\w]+\s+(.)\s+[\d\.\w]+-[\d\w]+\s+" % (re.escape(name)) match = re.search(search_pattern, stdout) @@ -216,7 +224,7 @@ def query_latest(module, name): def query_virtual(module, name): - cmd = "%s -v info --description %s" % (APK_PATH, name) + cmd = APK_PATH + ["-v", "info", "--description", name] rc, stdout, stderr = module.run_command(cmd, check_rc=False) search_pattern = r"^%s: virtual meta package" % (re.escape(name)) if re.search(search_pattern, stdout): @@ -225,7 +233,7 @@ def query_virtual(module, name): def get_dependencies(module, name): - cmd = "%s -v info --depends %s" % (APK_PATH, name) + cmd = APK_PATH + ["-v", "info", "--depends", name] rc, stdout, stderr = module.run_command(cmd, check_rc=False) dependencies = stdout.split() if len(dependencies) > 1: @@ -236,11 +244,11 @@ def get_dependencies(module, name): def upgrade_packages(module, available): if module.check_mode: - cmd = "%s upgrade --simulate" % (APK_PATH) + cmd = APK_PATH + ["upgrade", "--simulate"] else: - cmd = "%s upgrade" % (APK_PATH) + cmd = APK_PATH + ["upgrade"] if available: - cmd = "%s --available" % cmd + cmd.append("--available") rc, stdout, stderr = module.run_command(cmd, check_rc=False) packagelist = parse_for_packages(stdout) if rc != 0: @@ -271,17 +279,17 @@ def install_packages(module, names, state, world): upgrade = True if not to_install and not upgrade: module.exit_json(changed=False, msg="package(s) already installed") - packages = " ".join(to_install + to_upgrade) + packages = to_install + to_upgrade if upgrade: if module.check_mode: - cmd = "%s add --upgrade --simulate %s" % (APK_PATH, packages) + cmd = APK_PATH + ["add", "--upgrade", "--simulate"] + packages else: - cmd = "%s add --upgrade %s" % (APK_PATH, packages) + cmd = APK_PATH + ["add", "--upgrade"] + packages else: if module.check_mode: - cmd = "%s add --simulate %s" % (APK_PATH, packages) + cmd = APK_PATH + ["add", "--simulate"] + packages else: - cmd = "%s add %s" % (APK_PATH, packages) + cmd = APK_PATH + ["add"] + packages rc, stdout, stderr = module.run_command(cmd, check_rc=False) packagelist = parse_for_packages(stdout) if rc != 0: @@ -296,11 +304,11 @@ def remove_packages(module, names): installed.append(name) if not installed: module.exit_json(changed=False, msg="package(s) already removed") - names = " ".join(installed) + names = installed if module.check_mode: - cmd = "%s del --purge --simulate %s" % (APK_PATH, names) + cmd = APK_PATH + ["del", "--purge", "--simulate"] + names else: - cmd = "%s del --purge %s" % (APK_PATH, names) + cmd = APK_PATH + ["del", "--purge"] + names rc, stdout, stderr = module.run_command(cmd, check_rc=False) packagelist = parse_for_packages(stdout) # Check to see if packages are still present because of dependencies @@ -337,17 +345,20 @@ def main(): module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') global APK_PATH - APK_PATH = module.get_bin_path('apk', required=True) + APK_PATH = [module.get_bin_path('apk', required=True)] p = module.params + if p['name'] and any(not name.strip() for name in p['name']): + module.fail_json(msg="Package name(s) cannot be empty or whitespace-only") + if p['no_cache']: - APK_PATH = "%s --no-cache" % (APK_PATH, ) + APK_PATH.append("--no-cache") # add repositories to the APK_PATH if p['repository']: for r in p['repository']: - APK_PATH = "%s --repository %s --repositories-file /dev/null" % (APK_PATH, r) + APK_PATH.extend(["--repository", r, "--repositories-file", "/dev/null"]) # normalize the state parameter if p['state'] in ['present', 'installed']: diff --git a/plugins/modules/apt_repo.py b/plugins/modules/apt_repo.py index 2e9c9b109c..3d6da796b6 100644 --- a/plugins/modules/apt_repo.py +++ b/plugins/modules/apt_repo.py @@ -1,24 +1,28 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2018, Mikhail Gordeev # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: apt_repo -short_description: Manage APT repositories via apt-repo +short_description: Manage APT repositories using C(apt-repo) description: - - Manages APT repositories using apt-repo tool. - - See U(https://www.altlinux.org/Apt-repo) for details about apt-repo + - Manages APT repositories using C(apt-repo) tool. + - See U(https://www.altlinux.org/Apt-repo) for details about C(apt-repo). notes: - - This module works on ALT based distros. - - Does NOT support checkmode, due to a limitation in apt-repo tool. + - This module works on ALT based distros. + - Does NOT support checkmode, due to a limitation in C(apt-repo) tool. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: repo: description: @@ -28,13 +32,13 @@ options: state: description: - Indicates the desired repository state. - choices: [ absent, present ] + choices: [absent, present] default: present type: str remove_others: description: - - Remove other then added repositories - - Used if I(state=present) + - Remove other then added repositories. + - Used if O(state=present). type: bool default: false update: @@ -43,10 +47,10 @@ options: type: bool default: false author: -- Mikhail Gordeev (@obirvalger) -''' + - Mikhail Gordeev (@obirvalger) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Remove all repositories community.general.apt_repo: repo: all @@ -63,9 +67,9 @@ EXAMPLES = ''' repo: copy:///space/ALT/Sisyphus state: present update: true -''' +""" -RETURN = ''' # ''' +RETURN = """ # """ import os diff --git a/plugins/modules/apt_rpm.py b/plugins/modules/apt_rpm.py index d949a61e68..0c64385b1d 100644 --- a/plugins/modules/apt_rpm.py +++ b/plugins/modules/apt_rpm.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2013, Evgenii Terechkov # Written by Evgenii Terechkov @@ -8,39 +7,78 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: apt_rpm short_description: APT-RPM package manager description: - - Manages packages with I(apt-rpm). Both low-level (I(rpm)) and high-level (I(apt-get)) package manager binaries required. + - Manages packages with C(apt-rpm). Both low-level (C(rpm)) and high-level (C(apt-get)) package manager binaries required. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: package: description: - - list of packages to install, upgrade or remove. - required: true - aliases: [ name, pkg ] + - List of packages to install, upgrade, or remove. + - Since community.general 8.0.0, may include paths to local C(.rpm) files if O(state=installed) or O(state=present), + requires C(rpm) Python module. + aliases: [name, pkg] type: list elements: str state: description: - Indicates the desired package state. - choices: [ absent, present, installed, removed ] + - The states V(latest) and V(present_not_latest) have been added in community.general 8.6.0. + - Please note before community.general 11.0.0, V(present) and V(installed) were equivalent to V(latest). This changed + in community.general 11.0.0. Now they are equivalent to V(present_not_latest). + choices: + - absent + - present + - present_not_latest + - installed + - removed + - latest default: present type: str update_cache: description: - - update the package database first C(apt-get update). + - Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as + a separate step. + - Default is not to update the cache. type: bool default: false + clean: + description: + - Run the equivalent of C(apt-get clean) to clear out the local repository of retrieved package files. It removes everything + but the lock file from C(/var/cache/apt/archives/) and C(/var/cache/apt/archives/partial/). + - Can be run as part of the package installation (clean runs before install) or as a separate step. + type: bool + default: false + version_added: 6.5.0 + dist_upgrade: + description: + - If true performs an C(apt-get dist-upgrade) to upgrade system. + type: bool + default: false + version_added: 6.5.0 + update_kernel: + description: + - If true performs an C(update-kernel) to upgrade kernel packages. + type: bool + default: false + version_added: 6.5.0 +requirements: + - C(rpm) Python package (rpm bindings), optional. Required if O(package) option includes local files. author: -- Evgenii Terechkov (@evgkrsk) -''' + - Evgenii Terechkov (@evgkrsk) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install package foo community.general.apt_rpm: pkg: foo @@ -69,42 +107,140 @@ EXAMPLES = ''' name: bar state: present update_cache: true -''' + +- name: Run the equivalent of "apt-get clean" as a separate step + community.general.apt_rpm: + clean: true + +- name: Perform cache update and complete system upgrade (includes kernel) + community.general.apt_rpm: + update_cache: true + dist_upgrade: true + update_kernel: true +""" import os +import re +import traceback -from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import ( + AnsibleModule, + missing_required_lib, +) +from ansible.module_utils.common.text.converters import to_native +try: + import rpm +except ImportError: + HAS_RPM_PYTHON = False + RPM_PYTHON_IMPORT_ERROR = traceback.format_exc() +else: + HAS_RPM_PYTHON = True + RPM_PYTHON_IMPORT_ERROR = None + +APT_CACHE = "/usr/bin/apt-cache" APT_PATH = "/usr/bin/apt-get" RPM_PATH = "/usr/bin/rpm" +APT_GET_ZERO = "\n0 upgraded, 0 newly installed" +UPDATE_KERNEL_ZERO = "\nTry to install new kernel " + + +def local_rpm_package_name(path): + """return package name of a local rpm passed in. + Inspired by ansible.builtin.yum""" + + ts = rpm.TransactionSet() + ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES) + fd = os.open(path, os.O_RDONLY) + try: + header = ts.hdrFromFdno(fd) + except rpm.error as e: + return None + finally: + os.close(fd) + + return to_native(header[rpm.RPMTAG_NAME]) def query_package(module, name): # rpm -q returns 0 if the package is installed, # 1 if it is not installed - rc, out, err = module.run_command("%s -q %s" % (RPM_PATH, name)) + rc, out, err = module.run_command([RPM_PATH, "-q", name]) if rc == 0: return True else: return False -def query_package_provides(module, name): +def check_package_version(module, name): + # compare installed and candidate version + # if newest version already installed return True + # otherwise return False + + rc, out, err = module.run_command([APT_CACHE, "policy", name], environ_update={"LANG": "C"}) + installed = re.split("\n |: ", out)[2] + candidate = re.split("\n |: ", out)[4] + if installed >= candidate: + return True + return False + + +def query_package_provides(module, name, allow_upgrade=False): # rpm -q returns 0 if the package is installed, # 1 if it is not installed - rc, out, err = module.run_command("%s -q --provides %s" % (RPM_PATH, name)) - return rc == 0 + if name.endswith('.rpm'): + # Likely a local RPM file + if not HAS_RPM_PYTHON: + module.fail_json( + msg=missing_required_lib('rpm'), + exception=RPM_PYTHON_IMPORT_ERROR, + ) + + name = local_rpm_package_name(name) + + rc, out, err = module.run_command([RPM_PATH, "-q", "--provides", name]) + if rc == 0: + if not allow_upgrade: + return True + if check_package_version(module, name): + return True + return False def update_package_db(module): - rc, out, err = module.run_command("%s update" % APT_PATH) + rc, update_out, err = module.run_command([APT_PATH, "update"], check_rc=True, environ_update={"LANG": "C"}) + return (False, update_out) - if rc != 0: - module.fail_json(msg="could not update package db: %s" % err) + +def dir_size(module, path): + total_size = 0 + for path, dirs, files in os.walk(path): + for f in files: + total_size += os.path.getsize(os.path.join(path, f)) + return total_size + + +def clean(module): + t = dir_size(module, "/var/cache/apt/archives") + rc, out, err = module.run_command([APT_PATH, "clean"], check_rc=True) + return (t != dir_size(module, "/var/cache/apt/archives"), out) + + +def dist_upgrade(module): + rc, out, err = module.run_command([APT_PATH, "-y", "dist-upgrade"], check_rc=True, environ_update={"LANG": "C"}) + return (APT_GET_ZERO not in out, out) + + +def update_kernel(module): + rc, out, err = module.run_command(["/usr/sbin/update-kernel", "-y"], check_rc=True, environ_update={"LANG": "C"}) + return (UPDATE_KERNEL_ZERO not in out, out) def remove_packages(module, packages): + if packages is None: + return (False, "Empty package list") + remove_c = 0 # Using a for loop in case of error, we can report the package that failed for package in packages: @@ -112,7 +248,7 @@ def remove_packages(module, packages): if not query_package(module, package): continue - rc, out, err = module.run_command("%s -y remove %s" % (APT_PATH, package)) + rc, out, err = module.run_command([APT_PATH, "-y", "remove", package], environ_update={"LANG": "C"}) if rc != 0: module.fail_json(msg="failed to remove %s: %s" % (package, err)) @@ -120,42 +256,48 @@ def remove_packages(module, packages): remove_c += 1 if remove_c > 0: - module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) + return (True, "removed %s package(s)" % remove_c) - module.exit_json(changed=False, msg="package(s) already absent") + return (False, "package(s) already absent") -def install_packages(module, pkgspec): +def install_packages(module, pkgspec, allow_upgrade=False): - packages = "" + if pkgspec is None: + return (False, "Empty package list") + + packages = [] for package in pkgspec: - if not query_package_provides(module, package): - packages += "'%s' " % package + if not query_package_provides(module, package, allow_upgrade=allow_upgrade): + packages.append(package) - if len(packages) != 0: - - rc, out, err = module.run_command("%s -y install %s" % (APT_PATH, packages)) + if packages: + command = [APT_PATH, "-y", "install"] + packages + rc, out, err = module.run_command(command, environ_update={"LANG": "C"}) installed = True - for packages in pkgspec: - if not query_package_provides(module, package): + for package in pkgspec: + if not query_package_provides(module, package, allow_upgrade=False): installed = False # apt-rpm always have 0 for exit code if --force is used if rc or not installed: - module.fail_json(msg="'apt-get -y install %s' failed: %s" % (packages, err)) + module.fail_json(msg="'%s' failed: %s" % (" ".join(command), err)) else: - module.exit_json(changed=True, msg="%s present(s)" % packages) + return (True, "%s present(s)" % packages) else: - module.exit_json(changed=False) + return (False, "Nothing to install") def main(): module = AnsibleModule( argument_spec=dict( - state=dict(type='str', default='present', choices=['absent', 'installed', 'present', 'removed']), + state=dict(type='str', default='present', choices=['absent', 'installed', 'present', 'removed', 'present_not_latest', 'latest']), update_cache=dict(type='bool', default=False), - package=dict(type='list', elements='str', required=True, aliases=['name', 'pkg']), + clean=dict(type='bool', default=False), + dist_upgrade=dict(type='bool', default=False), + update_kernel=dict(type='bool', default=False), + package=dict(type='list', elements='str', aliases=['name', 'pkg']), ), ) @@ -164,16 +306,39 @@ def main(): p = module.params + modified = False + output = "" + if p['update_cache']: update_package_db(module) + if p['clean']: + (m, out) = clean(module) + modified = modified or m + + if p['dist_upgrade']: + (m, out) = dist_upgrade(module) + modified = modified or m + output += out + + if p['update_kernel']: + (m, out) = update_kernel(module) + modified = modified or m + output += out + packages = p['package'] + if p['state'] in ['installed', 'present', 'present_not_latest', 'latest']: + (m, out) = install_packages(module, packages, allow_upgrade=p['state'] == 'latest') + modified = modified or m + output += out - if p['state'] in ['installed', 'present']: - install_packages(module, packages) + if p['state'] in ['absent', 'removed']: + (m, out) = remove_packages(module, packages) + modified = modified or m + output += out - elif p['state'] in ['absent', 'removed']: - remove_packages(module, packages) + # Return total modification status and output of all commands + module.exit_json(changed=modified, msg=output) if __name__ == '__main__': diff --git a/plugins/modules/archive.py b/plugins/modules/archive.py index 83eae34f56..fe850391c8 100644 --- a/plugins/modules/archive.py +++ b/plugins/modules/archive.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2016, Ben Doherty # Sponsored by Oomph, Inc. http://www.oomphinc.com @@ -7,18 +6,23 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: archive short_description: Creates a compressed archive of one or more files or trees -extends_documentation_fragment: files +extends_documentation_fragment: + - files + - community.general.attributes description: - - Creates or extends an archive. - - The source and archive are on the remote host, and the archive I(is not) copied to the local host. - - Source files can be deleted after archival by specifying I(remove=True). + - Creates or extends an archive. + - The source and archive are on the target host, and the archive I(is not) copied to the controller host. + - Source files can be deleted after archival by specifying O(remove=True). +attributes: + check_mode: + support: full + diff_mode: + support: none options: path: description: @@ -29,27 +33,28 @@ options: format: description: - The type of compression to use. - - Support for xz was added in Ansible 2.5. type: str - choices: [ bz2, gz, tar, xz, zip ] + choices: [bz2, gz, tar, xz, zip] default: gz dest: description: - The file name of the destination archive. The parent directory must exists on the remote host. - - This is required when C(path) refers to multiple files by either specifying a glob, a directory or multiple paths in a list. - - If the destination archive already exists, it will be truncated and overwritten. + - This is required when O(path) refers to multiple files by either specifying a glob, a directory or multiple paths + in a list. + - If the destination archive already exists, it is truncated and overwritten. type: path exclude_path: description: - - Remote absolute path, glob, or list of paths or globs for the file or files to exclude from I(path) list and glob expansion. - - Use I(exclusion_patterns) to instead exclude files or subdirectories below any of the paths from the I(path) list. + - Remote absolute path, glob, or list of paths or globs for the file or files to exclude from O(path) list and glob + expansion. + - Use O(exclusion_patterns) to instead exclude files or subdirectories below any of the paths from the O(path) list. type: list elements: path default: [] exclusion_patterns: description: - Glob style patterns to exclude files or directories from the resulting archive. - - This differs from I(exclude_path) which applies only to the source paths from I(path). + - This differs from O(exclude_path) which applies only to the source paths from O(path). type: list elements: path version_added: 3.2.0 @@ -66,18 +71,16 @@ options: type: bool default: false notes: - - Can produce I(gzip), I(bzip2), I(lzma), and I(zip) compressed files or archives. - - This module uses C(tarfile), C(zipfile), C(gzip), and C(bz2) packages on the target host to create archives. - These are part of the Python standard library for Python 2 and 3. -requirements: - - Requires C(lzma) (standard library of Python 3) or L(backports.lzma, https://pypi.org/project/backports.lzma/) (Python 2) if using C(xz) format. + - Can produce C(gzip), C(bzip2), C(lzma), and C(zip) compressed files or archives. + - This module uses C(tarfile), C(zipfile), C(gzip), C(bz2), and C(lzma) packages on the target host to create archives. These are + part of the Python standard library. seealso: - - module: ansible.builtin.unarchive + - module: ansible.builtin.unarchive author: - - Ben Doherty (@bendoh) -''' + - Ben Doherty (@bendoh) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Compress directory /path/to/foo/ into /path/to/foo.tgz community.general.archive: path: /path/to/foo @@ -96,28 +99,28 @@ EXAMPLES = r''' - name: Create a bz2 archive of multiple files, rooted at /path community.general.archive: path: - - /path/to/foo - - /path/wong/foo + - /path/to/foo + - /path/wong/foo dest: /path/file.tar.bz2 format: bz2 - name: Create a bz2 archive of a globbed path, while excluding specific dirnames community.general.archive: path: - - /path/to/foo/* + - /path/to/foo/* dest: /path/file.tar.bz2 exclude_path: - - /path/to/foo/bar - - /path/to/foo/baz + - /path/to/foo/bar + - /path/to/foo/baz format: bz2 - name: Create a bz2 archive of a globbed path, while excluding a glob of dirnames community.general.archive: path: - - /path/to/foo/* + - /path/to/foo/* dest: /path/file.tar.bz2 exclude_path: - - /path/to/foo/ba* + - /path/to/foo/ba* format: bz2 - name: Use gzip to compress a single archive (i.e don't archive it first with tar) @@ -132,84 +135,65 @@ EXAMPLES = r''' dest: /path/file.tar.gz format: gz force_archive: true -''' +""" -RETURN = r''' +RETURN = r""" state: - description: - The state of the input C(path). - type: str - returned: always + description: The state of the input O(path). + type: str + returned: always dest_state: - description: - - The state of the I(dest) file. - - C(absent) when the file does not exist. - - C(archive) when the file is an archive. - - C(compress) when the file is compressed, but not an archive. - - C(incomplete) when the file is an archive, but some files under I(path) were not found. - type: str - returned: success - version_added: 3.4.0 + description: + - The state of the O(dest) file. + - V(absent) when the file does not exist. + - V(archive) when the file is an archive. + - V(compress) when the file is compressed, but not an archive. + - V(incomplete) when the file is an archive, but some files under O(path) were not found. + type: str + returned: success + version_added: 3.4.0 missing: - description: Any files that were missing from the source. - type: list - returned: success + description: Any files that were missing from the source. + type: list + returned: success archived: - description: Any files that were compressed or added to the archive. - type: list - returned: success + description: Any files that were compressed or added to the archive. + type: list + returned: success arcroot: - description: The archive root. - type: str - returned: always + description: The archive root. + type: str + returned: always expanded_paths: - description: The list of matching paths from paths argument. - type: list - returned: always + description: The list of matching paths from paths argument. + type: list + returned: always expanded_exclude_paths: - description: The list of matching exclude paths from the exclude_path argument. - type: list - returned: always -''' + description: The list of matching exclude paths from the exclude_path argument. + type: list + returned: always +""" import abc import bz2 import glob import gzip import io +import lzma import os import re import shutil import tarfile import zipfile from fnmatch import fnmatch -from sys import version_info from traceback import format_exc +from zipfile import BadZipFile from zlib import crc32 -from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_bytes, to_native -from ansible.module_utils import six -LZMA_IMP_ERR = None -if six.PY3: - try: - import lzma - HAS_LZMA = True - except ImportError: - LZMA_IMP_ERR = format_exc() - HAS_LZMA = False -else: - try: - from backports import lzma - HAS_LZMA = True - except ImportError: - LZMA_IMP_ERR = format_exc() - HAS_LZMA = False - -PY27 = version_info[0:2] >= (2, 7) - STATE_ABSENT = 'absent' STATE_ARCHIVED = 'archive' STATE_COMPRESSED = 'compress' @@ -217,7 +201,7 @@ STATE_INCOMPLETE = 'incomplete' def common_path(paths): - empty = b'' if paths and isinstance(paths[0], six.binary_type) else '' + empty = b'' if paths and isinstance(paths[0], bytes) else '' return os.path.join( os.path.dirname(os.path.commonprefix([os.path.join(os.path.dirname(p), empty) for p in paths])), empty @@ -262,8 +246,7 @@ def _to_native_ascii(s): return to_native(s, errors='surrogate_or_strict', encoding='ascii') -@six.add_metaclass(abc.ABCMeta) -class Archive(object): +class Archive(object, metaclass=abc.ABCMeta): def __init__(self, module): self.module = module @@ -527,7 +510,7 @@ class ZipArchive(Archive): archive = zipfile.ZipFile(_to_native_ascii(path), 'r') checksums = set((info.filename, info.CRC) for info in archive.infolist()) archive.close() - except zipfile.BadZipfile: + except BadZipFile: checksums = set() return checksums @@ -565,23 +548,14 @@ class TarArchive(Archive): self.module.fail_json(msg="%s is not a valid archive format" % self.format) def _add(self, path, archive_name): - def py27_filter(tarinfo): + def filter(tarinfo): return None if matches_exclusion_patterns(tarinfo.name, self.exclusion_patterns) else tarinfo - def py26_filter(path): - return matches_exclusion_patterns(path, self.exclusion_patterns) - - if PY27: - self.file.add(path, archive_name, recursive=False, filter=py27_filter) - else: - self.file.add(path, archive_name, recursive=False, exclude=py26_filter) + self.file.add(path, archive_name, recursive=False, filter=filter) def _get_checksums(self, path): - if HAS_LZMA: - LZMAError = lzma.LZMAError - else: - # Just picking another exception that's also listed below - LZMAError = tarfile.ReadError + LZMAError = lzma.LZMAError + try: if self.format == 'xz': with lzma.open(_to_native_ascii(path), 'r') as f: @@ -597,7 +571,13 @@ class TarArchive(Archive): # The python implementations of gzip, bz2, and lzma do not support restoring compressed files # to their original names so only file checksum is returned f = self._open_compressed_file(_to_native_ascii(path), 'r') - checksums = set([(b'', crc32(f.read()))]) + checksum = 0 + while True: + chunk = f.read(16 * 1024 * 1024) + if not chunk: + break + checksum = crc32(chunk, checksum) + checksums = set([(b'', checksum)]) f.close() except Exception: checksums = set() @@ -626,11 +606,6 @@ def main(): supports_check_mode=True, ) - if not HAS_LZMA and module.params['format'] == 'xz': - module.fail_json( - msg=missing_required_lib("lzma or backports.lzma", reason="when using xz format"), exception=LZMA_IMP_ERR - ) - check_mode = module.check_mode archive = get_archive(module) diff --git a/plugins/modules/atomic_container.py b/plugins/modules/atomic_container.py index c32e617a22..9051705f12 100644 --- a/plugins/modules/atomic_container.py +++ b/plugins/modules/atomic_container.py @@ -1,71 +1,77 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: atomic_container short_description: Manage the containers on the atomic host platform description: - - Manage the containers on the atomic host platform. - - Allows to manage the lifecycle of a container on the atomic host platform. + - Manage the containers on the atomic host platform. + - Allows to manage the lifecycle of a container on the atomic host platform. +deprecated: + removed_in: 13.0.0 + why: Project Atomic was sunset by the end of 2019. + alternative: There is none. author: "Giuseppe Scrivano (@giuseppe)" -notes: - - Host should support C(atomic) command requirements: - - atomic - - "python >= 2.6" + - atomic +notes: + - According to U(https://projectatomic.io/) the project has been sunset around 2019/2020, in favor of C(podman) and Fedora CoreOS. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - backend: - description: - - Define the backend to use for the container. - required: true - choices: ["docker", "ostree"] - type: str - name: - description: - - Name of the container. - required: true - type: str - image: - description: - - The image to use to install the container. - required: true - type: str - rootfs: - description: - - Define the rootfs of the image. - type: str - state: - description: - - State of the container. - choices: ["absent", "latest", "present", "rollback"] - default: "latest" - type: str - mode: - description: - - Define if it is an user or a system container. - choices: ["user", "system"] - type: str - values: - description: - - Values for the installation of the container. - - This option is permitted only with mode 'user' or 'system'. - - The values specified here will be used at installation time as --set arguments for atomic install. - type: list - elements: str - default: [] -''' - -EXAMPLES = r''' + backend: + description: + - Define the backend to use for the container. + required: true + choices: ["docker", "ostree"] + type: str + name: + description: + - Name of the container. + required: true + type: str + image: + description: + - The image to use to install the container. + required: true + type: str + rootfs: + description: + - Define the rootfs of the image. + type: str + state: + description: + - State of the container. + choices: ["absent", "latest", "present", "rollback"] + default: "latest" + type: str + mode: + description: + - Define if it is an user or a system container. + choices: ["user", "system"] + type: str + values: + description: + - Values for the installation of the container. + - This option is permitted only with mode 'user' or 'system'. + - The values specified here will be used at installation time as --set arguments for atomic install. + type: list + elements: str + default: [] +""" +EXAMPLES = r""" - name: Install the etcd system container community.general.atomic_container: name: etcd @@ -74,7 +80,7 @@ EXAMPLES = r''' state: latest mode: system values: - - ETCD_NAME=etcd.server + - ETCD_NAME=etcd.server - name: Uninstall the etcd system container community.general.atomic_container: @@ -83,15 +89,15 @@ EXAMPLES = r''' backend: ostree state: absent mode: system -''' +""" -RETURN = r''' +RETURN = r""" msg: - description: The command standard output - returned: always - type: str - sample: 'Using default tag: latest ...' -''' + description: The command standard output. + returned: always + type: str + sample: 'Using default tag: latest ...' +""" # import module snippets import traceback diff --git a/plugins/modules/atomic_host.py b/plugins/modules/atomic_host.py index 5aa389e174..470e65c919 100644 --- a/plugins/modules/atomic_host.py +++ b/plugins/modules/atomic_host.py @@ -1,38 +1,46 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: atomic_host short_description: Manage the atomic host platform description: - - Manage the atomic host platform. - - Rebooting of Atomic host platform should be done outside this module. + - Manage the atomic host platform. + - Rebooting of Atomic host platform should be done outside this module. +deprecated: + removed_in: 13.0.0 + why: Project Atomic was sunset by the end of 2019. + alternative: There is none. author: -- Saravanan KR (@krsacme) + - Saravanan KR (@krsacme) notes: - - Host should be an atomic platform (verified by existence of '/run/ostree-booted' file). + - Host should be an atomic platform (verified by existence of '/run/ostree-booted' file). + - According to U(https://projectatomic.io/) the project has been sunset around 2019/2020, in favor of C(podman) and Fedora CoreOS. requirements: - atomic - - python >= 2.6 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - revision: - description: - - The version number of the atomic host to be deployed. - - Providing C(latest) will upgrade to the latest available version. - default: 'latest' - aliases: [ version ] - type: str -''' + revision: + description: + - The version number of the atomic host to be deployed. + - Providing V(latest) will upgrade to the latest available version. + default: 'latest' + aliases: [version] + type: str +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Upgrade the atomic host platform to the latest version (atomic host upgrade) community.general.atomic_host: revision: latest @@ -40,15 +48,15 @@ EXAMPLES = r''' - name: Deploy a specific revision as the atomic host (atomic host deploy 23.130) community.general.atomic_host: revision: 23.130 -''' +""" -RETURN = r''' +RETURN = r""" msg: - description: The command standard output - returned: always - type: str - sample: 'Already on latest' -''' + description: The command standard output. + returned: always + type: str + sample: 'Already on latest' +""" import os import traceback diff --git a/plugins/modules/atomic_image.py b/plugins/modules/atomic_image.py index 2705304f01..0c3025b75f 100644 --- a/plugins/modules/atomic_image.py +++ b/plugins/modules/atomic_image.py @@ -1,53 +1,61 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: atomic_image short_description: Manage the container images on the atomic host platform description: - - Manage the container images on the atomic host platform. - - Allows to execute the commands specified by the RUN label in the container image when present. + - Manage the container images on the atomic host platform. + - Allows to execute the commands specified by the RUN label in the container image when present. +deprecated: + removed_in: 13.0.0 + why: Project Atomic was sunset by the end of 2019. + alternative: There is none. author: -- Saravanan KR (@krsacme) + - Saravanan KR (@krsacme) notes: - - Host should support C(atomic) command. + - According to U(https://projectatomic.io/) the project has been sunset around 2019/2020, in favor of C(podman) and Fedora CoreOS. requirements: - atomic - - python >= 2.6 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - backend: - description: - - Define the backend where the image is pulled. - choices: [ 'docker', 'ostree' ] - type: str - name: - description: - - Name of the container image. - required: true - type: str - state: - description: - - The state of the container image. - - The state C(latest) will ensure container image is upgraded to the latest version and forcefully restart container, if running. - choices: [ 'absent', 'latest', 'present' ] - default: 'latest' - type: str - started: - description: - - Start or Stop the container. - type: bool - default: true -''' + backend: + description: + - Define the backend where the image is pulled. + choices: ['docker', 'ostree'] + type: str + name: + description: + - Name of the container image. + required: true + type: str + state: + description: + - The state of the container image. + - The state V(latest) will ensure container image is upgraded to the latest version and forcefully restart container, + if running. + choices: ['absent', 'latest', 'present'] + default: 'latest' + type: str + started: + description: + - Start or stop the container. + type: bool + default: true +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Execute the run command on rsyslog container image (atomic run rhel7/rsyslog) community.general.atomic_image: name: rhel7/rsyslog @@ -58,15 +66,15 @@ EXAMPLES = r''' name: busybox state: latest backend: ostree -''' +""" -RETURN = r''' +RETURN = r""" msg: - description: The command standard output - returned: always - type: str - sample: 'Using default tag: latest ...' -''' + description: The command standard output. + returned: always + type: str + sample: 'Using default tag: latest ...' +""" import traceback from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/awall.py b/plugins/modules/awall.py index dc13d46789..37e1e87a1c 100644 --- a/plugins/modules/awall.py +++ b/plugins/modules/awall.py @@ -1,23 +1,27 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017, Ted Trask # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: awall short_description: Manage awall policies author: Ted Trask (@tdtrask) description: - - This modules allows for enable/disable/activate of I(awall) policies. - - Alpine Wall (I(awall)) generates a firewall configuration from the enabled policy files - and activates the configuration on the system. + - This modules allows for enable/disable/activate of C(awall) policies. + - Alpine Wall (C(awall)) generates a firewall configuration from the enabled policy files and activates the configuration + on the system. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: name: description: @@ -28,36 +32,39 @@ options: description: - Whether the policies should be enabled or disabled. type: str - choices: [ disabled, enabled ] + choices: [disabled, enabled] default: enabled activate: description: - Activate the new firewall rules. - Can be run with other steps or on its own. + - Idempotency is affected if O(activate=true), as the module always reports a changed state. type: bool default: false -''' +notes: + - At least one of O(name) and O(activate) is required. +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Enable "foo" and "bar" policy community.general.awall: - name: [ foo bar ] + name: [foo bar] state: enabled - name: Disable "foo" and "bar" policy and activate new rules community.general.awall: name: - - foo - - bar + - foo + - bar state: disabled activate: false - name: Activate currently enabled firewall rules community.general.awall: activate: true -''' +""" -RETURN = ''' # ''' +RETURN = """ # """ import re from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/beadm.py b/plugins/modules/beadm.py index 7a84997089..f285616ca7 100644 --- a/plugins/modules/beadm.py +++ b/plugins/modules/beadm.py @@ -1,63 +1,65 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2016, Adam Števko # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: beadm short_description: Manage ZFS boot environments on FreeBSD/Solaris/illumos systems description: - - Create, delete or activate ZFS boot environments. - - Mount and unmount ZFS boot environments. + - Create, delete or activate ZFS boot environments. + - Mount and unmount ZFS boot environments. author: Adam Števko (@xen0l) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - ZFS boot environment name. - type: str - required: true - aliases: [ "be" ] - snapshot: - description: - - If specified, the new boot environment will be cloned from the given - snapshot or inactive boot environment. - type: str + name: description: - description: - - Associate a description with a new boot environment. This option is - available only on Solarish platforms. - type: str - options: - description: - - Create the datasets for new BE with specific ZFS properties. - - Multiple options can be specified. - - This option is available only on Solarish platforms. - type: str - mountpoint: - description: - - Path where to mount the ZFS boot environment. - type: path - state: - description: - - Create or delete ZFS boot environment. - type: str - choices: [ absent, activated, mounted, present, unmounted ] - default: present - force: - description: - - Specifies if the unmount should be forced. - type: bool - default: false -''' + - ZFS boot environment name. + type: str + required: true + aliases: ["be"] + snapshot: + description: + - If specified, the new boot environment is cloned from the given snapshot or inactive boot environment. + type: str + description: + description: + - Associate a description with a new boot environment. This option is available only on Solarish platforms. + type: str + options: + description: + - Create the datasets for new BE with specific ZFS properties. + - Multiple options can be specified. + - This option is available only on Solarish platforms. + type: str + mountpoint: + description: + - Path where to mount the ZFS boot environment. + type: path + state: + description: + - Create or delete ZFS boot environment. + type: str + choices: [absent, activated, mounted, present, unmounted] + default: present + force: + description: + - Specifies if the unmount should be forced. + type: bool + default: false +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create ZFS boot environment community.general.beadm: name: upgrade-be @@ -96,45 +98,45 @@ EXAMPLES = r''' community.general.beadm: name: upgrade-be state: activated -''' +""" -RETURN = r''' +RETURN = r""" name: - description: BE name - returned: always - type: str - sample: pre-upgrade + description: BE name. + returned: always + type: str + sample: pre-upgrade snapshot: - description: ZFS snapshot to create BE from - returned: always - type: str - sample: rpool/ROOT/oi-hipster@fresh + description: ZFS snapshot to create BE from. + returned: always + type: str + sample: rpool/ROOT/oi-hipster@fresh description: - description: BE description - returned: always - type: str - sample: Upgrade from 9.0 to 10.0 + description: BE description. + returned: always + type: str + sample: Upgrade from 9.0 to 10.0 options: - description: BE additional options - returned: always - type: str - sample: compression=on + description: BE additional options. + returned: always + type: str + sample: compression=on mountpoint: - description: BE mountpoint - returned: always - type: str - sample: /mnt/be + description: BE mountpoint. + returned: always + type: str + sample: /mnt/be state: - description: state of the target - returned: always - type: str - sample: present + description: State of the target. + returned: always + type: str + sample: present force: - description: If forced action is wanted - returned: always - type: bool - sample: false -''' + description: If forced action is wanted. + returned: always + type: bool + sample: false +""" import os from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/bearychat.py b/plugins/modules/bearychat.py deleted file mode 100644 index 48d5c994fc..0000000000 --- a/plugins/modules/bearychat.py +++ /dev/null @@ -1,168 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# Copyright (c) 2016, Jiangge Zhang -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' -module: bearychat -short_description: Send BearyChat notifications -description: - - The M(community.general.bearychat) module sends notifications to U(https://bearychat.com) - via the Incoming Robot integration. -author: "Jiangge Zhang (@tonyseek)" -options: - url: - type: str - description: - - BearyChat WebHook URL. This authenticates you to the bearychat - service. It looks like - C(https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60). - required: true - text: - type: str - description: - - Message to send. - markdown: - description: - - If C(true), text will be parsed as markdown. - default: true - type: bool - channel: - type: str - description: - - Channel to send the message to. If absent, the message goes to the - default channel selected by the I(url). - attachments: - type: list - elements: dict - description: - - Define a list of attachments. For more information, see - https://github.com/bearyinnovative/bearychat-tutorial/blob/master/robots/incoming.md#attachments -''' - -EXAMPLES = """ -- name: Send notification message via BearyChat - local_action: - module: bearychat - url: | - https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60 - text: "{{ inventory_hostname }} completed" - -- name: Send notification message via BearyChat all options - local_action: - module: bearychat - url: | - https://hook.bearychat.com/=ae2CF/incoming/e61bd5c57b164e04b11ac02e66f47f60 - text: "{{ inventory_hostname }} completed" - markdown: false - channel: "#ansible" - attachments: - - title: "Ansible on {{ inventory_hostname }}" - text: "May the Force be with you." - color: "#ffffff" - images: - - http://example.com/index.png -""" - -RETURN = """ -msg: - description: execution result - returned: success - type: str - sample: "OK" -""" - -try: - from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse - HAS_URLPARSE = True -except Exception: - HAS_URLPARSE = False -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.urls import fetch_url - - -def build_payload_for_bearychat(module, text, markdown, channel, attachments): - payload = {} - if text is not None: - payload['text'] = text - if markdown is not None: - payload['markdown'] = markdown - if channel is not None: - payload['channel'] = channel - if attachments is not None: - payload.setdefault('attachments', []).extend( - build_payload_for_bearychat_attachment( - module, item.get('title'), item.get('text'), item.get('color'), - item.get('images')) - for item in attachments) - payload = 'payload=%s' % module.jsonify(payload) - return payload - - -def build_payload_for_bearychat_attachment(module, title, text, color, images): - attachment = {} - if title is not None: - attachment['title'] = title - if text is not None: - attachment['text'] = text - if color is not None: - attachment['color'] = color - if images is not None: - target_images = attachment.setdefault('images', []) - if not isinstance(images, (list, tuple)): - images = [images] - for image in images: - if isinstance(image, dict) and 'url' in image: - image = {'url': image['url']} - elif hasattr(image, 'startswith') and image.startswith('http'): - image = {'url': image} - else: - module.fail_json( - msg="BearyChat doesn't have support for this kind of " - "attachment image") - target_images.append(image) - return attachment - - -def do_notify_bearychat(module, url, payload): - response, info = fetch_url(module, url, data=payload) - if info['status'] != 200: - url_info = urlparse(url) - obscured_incoming_webhook = urlunparse( - (url_info.scheme, url_info.netloc, '[obscured]', '', '', '')) - module.fail_json( - msg=" failed to send %s to %s: %s" % ( - payload, obscured_incoming_webhook, info['msg'])) - - -def main(): - module = AnsibleModule(argument_spec={ - 'url': dict(type='str', required=True, no_log=True), - 'text': dict(type='str'), - 'markdown': dict(default=True, type='bool'), - 'channel': dict(type='str'), - 'attachments': dict(type='list', elements='dict'), - }) - - if not HAS_URLPARSE: - module.fail_json(msg='urlparse is not installed') - - url = module.params['url'] - text = module.params['text'] - markdown = module.params['markdown'] - channel = module.params['channel'] - attachments = module.params['attachments'] - - payload = build_payload_for_bearychat( - module, text, markdown, channel, attachments) - do_notify_bearychat(module, url, payload) - - module.exit_json(msg="OK") - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/bigpanda.py b/plugins/modules/bigpanda.py index 4e653aadb7..1bdd79d548 100644 --- a/plugins/modules/bigpanda.py +++ b/plugins/modules/bigpanda.py @@ -1,25 +1,30 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: bigpanda author: "Hagai Kariti (@hkariti)" short_description: Notify BigPanda about deployments description: - - Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters for future module calls. + - Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters + for future module calls. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: component: type: str description: - - "The name of the component being deployed. Ex: billing" + - 'The name of the component being deployed. Ex: V(billing).' required: true aliases: ['name'] version: @@ -48,7 +53,7 @@ options: env: type: str description: - - The environment name, typically 'production', 'staging', etc. + - The environment name, typically V(production), V(staging), and so on. required: false owner: type: str @@ -65,30 +70,30 @@ options: description: - Base URL of the API server. required: false - default: https://api.bigpanda.io + default: "https://api.bigpanda.io" validate_certs: description: - - If C(false), SSL certificates for the target url will not be validated. This should only be used - on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates for the target URL are not validated. This should only be used on personally controlled + sites using self-signed certificates. required: false default: true type: bool deployment_message: type: str description: - - Message about the deployment. + - Message about the deployment. version_added: '0.2.0' source_system: type: str description: - - Source system used in the requests to the API + - Source system used in the requests to the API. default: ansible # informational: requirements for nodes -requirements: [ ] -''' +requirements: [] +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Notify BigPanda about a deployment community.general.bigpanda: component: myapp @@ -121,7 +126,7 @@ EXAMPLES = ''' token: '{{ deployment.token }}' state: finished delegate_to: localhost -''' +""" # =========================================== # Module execution. @@ -143,14 +148,14 @@ def main(): version=dict(required=True), token=dict(required=True, no_log=True), state=dict(required=True, choices=['started', 'finished', 'failed']), - hosts=dict(required=False, aliases=['host']), - env=dict(required=False), - owner=dict(required=False), - description=dict(required=False), - deployment_message=dict(required=False), - source_system=dict(required=False, default='ansible'), + hosts=dict(aliases=['host']), + env=dict(), + owner=dict(), + description=dict(), + deployment_message=dict(), + source_system=dict(default='ansible'), validate_certs=dict(default=True, type='bool'), - url=dict(required=False, default='https://api.bigpanda.io'), + url=dict(default='https://api.bigpanda.io'), ), supports_check_mode=True, ) diff --git a/plugins/modules/bitbucket_access_key.py b/plugins/modules/bitbucket_access_key.py index 0708777a0a..2b2bf9b8c5 100644 --- a/plugins/modules/bitbucket_access_key.py +++ b/plugins/modules/bitbucket_access_key.py @@ -1,15 +1,12 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2019, Evgeniy Krysanov # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: bitbucket_access_key short_description: Manages Bitbucket repository access keys description: @@ -18,6 +15,12 @@ author: - Evgeniy Krysanov (@catcombo) extends_documentation_fragment: - community.general.bitbucket + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: repository: description: @@ -27,7 +30,7 @@ options: workspace: description: - The repository owner. - - I(username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of I(user). + - B(Note:) O(ignore:username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of O(user). type: str required: true key: @@ -44,13 +47,13 @@ options: - Indicates desired state of the access key. type: str required: true - choices: [ absent, present ] + choices: [absent, present] notes: - Bitbucket OAuth consumer or App password should have permissions to read and administrate account repositories. - Check mode is supported. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create access key community.general.bitbucket_access_key: repository: 'bitbucket-repo' @@ -65,9 +68,9 @@ EXAMPLES = r''' workspace: bitbucket_workspace label: Bitbucket state: absent -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper @@ -152,7 +155,7 @@ def get_existing_deploy_key(module, bitbucket): if info['status'] != 200: module.fail_json(msg='Failed to retrieve the list of deploy keys: {0}'.format(info)) - res = next(iter(filter(lambda v: v['label'] == module.params['label'], content['values'])), None) + res = next((v for v in content['values'] if v['label'] == module.params['label']), None) if res is not None: return res diff --git a/plugins/modules/bitbucket_pipeline_key_pair.py b/plugins/modules/bitbucket_pipeline_key_pair.py index db4453d45c..28d837c914 100644 --- a/plugins/modules/bitbucket_pipeline_key_pair.py +++ b/plugins/modules/bitbucket_pipeline_key_pair.py @@ -1,15 +1,12 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2019, Evgeniy Krysanov # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: bitbucket_pipeline_key_pair short_description: Manages Bitbucket pipeline SSH key pair description: @@ -18,6 +15,12 @@ author: - Evgeniy Krysanov (@catcombo) extends_documentation_fragment: - community.general.bitbucket + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: repository: description: @@ -27,7 +30,7 @@ options: workspace: description: - The repository owner. - - I(username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of I(user). + - B(Note:) O(ignore:username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of O(user). type: str required: true public_key: @@ -43,12 +46,12 @@ options: - Indicates desired state of the key pair. type: str required: true - choices: [ absent, present ] + choices: [absent, present] notes: - Check mode is supported. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create or update SSH key pair community.general.bitbucket_pipeline_key_pair: repository: 'bitbucket-repo' @@ -62,9 +65,9 @@ EXAMPLES = r''' repository: bitbucket-repo workspace: bitbucket_workspace state: absent -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper diff --git a/plugins/modules/bitbucket_pipeline_known_host.py b/plugins/modules/bitbucket_pipeline_known_host.py index e573719362..fb382c8afb 100644 --- a/plugins/modules/bitbucket_pipeline_known_host.py +++ b/plugins/modules/bitbucket_pipeline_known_host.py @@ -1,26 +1,29 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2019, Evgeniy Krysanov # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: bitbucket_pipeline_known_host short_description: Manages Bitbucket pipeline known hosts description: - Manages Bitbucket pipeline known hosts under the "SSH Keys" menu. - - The host fingerprint will be retrieved automatically, but in case of an error, one can use I(key) field to specify it manually. + - The host fingerprint is retrieved automatically, but in case of an error, one can use O(key) field to specify it manually. author: - Evgeniy Krysanov (@catcombo) extends_documentation_fragment: - community.general.bitbucket + - community.general.attributes requirements: - - paramiko + - paramiko +attributes: + check_mode: + support: full + diff_mode: + support: none options: repository: description: @@ -30,7 +33,7 @@ options: workspace: description: - The repository owner. - - I(username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of I(user). + - B(Note:) O(ignore:username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of O(user). type: str required: true name: @@ -47,12 +50,12 @@ options: - Indicates desired state of the record. type: str required: true - choices: [ absent, present ] + choices: [absent, present] notes: - Check mode is supported. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create known hosts from the list community.general.bitbucket_pipeline_known_host: repository: 'bitbucket-repo' @@ -77,9 +80,9 @@ EXAMPLES = r''' name: bitbucket.org key: '{{lookup("file", "bitbucket.pub") }}' state: absent -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ import socket @@ -148,7 +151,7 @@ def get_existing_known_host(module, bitbucket): if info['status'] != 200: module.fail_json(msg='Failed to retrieve list of known hosts: {0}'.format(info)) - host = next(filter(lambda v: v['hostname'] == module.params['name'], content['values']), None) + host = next((v for v in content['values'] if v['hostname'] == module.params['name']), None) if host is not None: return host diff --git a/plugins/modules/bitbucket_pipeline_variable.py b/plugins/modules/bitbucket_pipeline_variable.py index 45661d8dee..ea43beba55 100644 --- a/plugins/modules/bitbucket_pipeline_variable.py +++ b/plugins/modules/bitbucket_pipeline_variable.py @@ -1,15 +1,12 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2019, Evgeniy Krysanov # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: bitbucket_pipeline_variable short_description: Manages Bitbucket pipeline variables description: @@ -18,6 +15,12 @@ author: - Evgeniy Krysanov (@catcombo) extends_documentation_fragment: - community.general.bitbucket + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: repository: description: @@ -27,7 +30,7 @@ options: workspace: description: - The repository owner. - - I(username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of I(user). + - B(Note:) O(ignore:username) used to be an alias of this option. Since community.general 6.0.0 it is an alias of O(user). type: str required: true name: @@ -49,13 +52,13 @@ options: - Indicates desired state of the variable. type: str required: true - choices: [ absent, present ] + choices: [absent, present] notes: - Check mode is supported. - - For secured values return parameter C(changed) is always C(True). -''' + - For secured values return parameter C(changed) is always V(true). +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create or update pipeline variables from the list community.general.bitbucket_pipeline_variable: repository: 'bitbucket-repo' @@ -65,8 +68,8 @@ EXAMPLES = r''' secured: '{{ item.secured }}' state: present with_items: - - { name: AWS_ACCESS_KEY, value: ABCD1234, secured: false } - - { name: AWS_SECRET, value: qwe789poi123vbn0, secured: true } + - {name: AWS_ACCESS_KEY, value: ABCD1234, secured: false} + - {name: AWS_SECRET, value: qwe789poi123vbn0, secured: true} - name: Remove pipeline variable community.general.bitbucket_pipeline_variable: @@ -74,9 +77,9 @@ EXAMPLES = r''' workspace: bitbucket_workspace name: AWS_ACCESS_KEY state: absent -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ from ansible.module_utils.basic import AnsibleModule, _load_params from ansible_collections.community.general.plugins.module_utils.source_control.bitbucket import BitbucketHelper @@ -136,7 +139,7 @@ def get_existing_pipeline_variable(module, bitbucket): return None page += 1 - var = next(filter(lambda v: v['key'] == module.params['name'], content['values']), None) + var = next((v for v in content['values'] if v['key'] == module.params['name']), None) if var is not None: var['name'] = var.pop('key') diff --git a/plugins/modules/bootc_manage.py b/plugins/modules/bootc_manage.py new file mode 100644 index 0000000000..d854f866bf --- /dev/null +++ b/plugins/modules/bootc_manage.py @@ -0,0 +1,92 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Ryan Cook +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt +# or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: bootc_manage +version_added: 9.3.0 +author: + - Ryan Cook (@cooktheryan) +short_description: Bootc Switch and Upgrade +description: + - This module manages the switching and upgrading of C(bootc). +options: + state: + description: + - Control whether to apply the latest image or switch the image. + - B(Note:) This does not reboot the system. + - Please use M(ansible.builtin.reboot) to reboot the system. + required: true + type: str + choices: ['switch', 'latest'] + image: + description: + - The image to switch to. + - This is required when O(state=switch). + required: false + type: str +""" + +EXAMPLES = r""" +# Switch to a different image +- name: Provide image to switch to a different image and retain the current running image + community.general.bootc_manage: + state: switch + image: "example.com/image:latest" + +# Apply updates of the current running image +- name: Apply updates of the current running image + community.general.bootc_manage: + state: latest +""" + +RETURN = r""" +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.locale import get_best_parsable_locale + + +def main(): + argument_spec = dict( + state=dict(type='str', required=True, choices=['switch', 'latest']), + image=dict(type='str'), + ) + module = AnsibleModule( + argument_spec=argument_spec, + required_if=[ + ('state', 'switch', ['image']), + ], + ) + + state = module.params['state'] + image = module.params['image'] + + if state == 'switch': + command = ['bootc', 'switch', image, '--retain'] + elif state == 'latest': + command = ['bootc', 'upgrade'] + + locale = get_best_parsable_locale(module) + module.run_command_environ_update = dict(LANG=locale, LC_ALL=locale, LC_MESSAGES=locale, LC_CTYPE=locale, LANGUAGE=locale) + rc, stdout, err = module.run_command(command, check_rc=True) + + if 'Queued for next boot: ' in stdout: + result = {'changed': True, 'stdout': stdout} + module.exit_json(**result) + elif 'No changes in ' in stdout or 'Image specification is unchanged.' in stdout: + result = {'changed': False, 'stdout': stdout} + module.exit_json(**result) + else: + result = {'changed': False, 'stderr': err} + module.fail_json(msg='ERROR: Command execution failed.', **result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/bower.py b/plugins/modules/bower.py index 8964bd0434..fd4e2c4920 100644 --- a/plugins/modules/bower.py +++ b/plugins/modules/bower.py @@ -1,58 +1,62 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2014, Michael Warkentin # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: bower -short_description: Manage bower packages with bower +short_description: Manage bower packages with C(bower) description: - - Manage bower packages with bower + - Manage bower packages with C(bower). author: "Michael Warkentin (@mwarkentin)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: name: type: str description: - - The name of a bower package to install + - The name of a bower package to install. offline: description: - - Install packages from local cache, if the packages were installed before + - Install packages from local cache, if the packages were installed before. type: bool default: false production: description: - - Install with --production flag + - Install with C(--production) flag. type: bool default: false path: type: path description: - - The base path where to install the bower packages + - The base path where to install the bower packages. required: true relative_execpath: type: path description: - - Relative path to bower executable from install path + - Relative path to bower executable from install path. state: type: str description: - - The state of the bower package + - The state of the bower package. default: present - choices: [ "present", "absent", "latest" ] + choices: ["present", "absent", "latest"] version: type: str description: - - The version to be installed -''' + - The version to be installed. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install "bootstrap" bower package. community.general.bower: name: bootstrap @@ -84,7 +88,8 @@ EXAMPLES = ''' - community.general.bower: path: /app/location relative_execpath: node_modules/.bin -''' +""" + import json import os @@ -180,13 +185,13 @@ class Bower(object): def main(): arg_spec = dict( - name=dict(default=None), + name=dict(), offline=dict(default=False, type='bool'), production=dict(default=False, type='bool'), path=dict(required=True, type='path'), - relative_execpath=dict(default=None, required=False, type='path'), + relative_execpath=dict(type='path'), state=dict(default='present', choices=['present', 'absent', 'latest', ]), - version=dict(default=None), + version=dict(), ) module = AnsibleModule( argument_spec=arg_spec diff --git a/plugins/modules/btrfs_info.py b/plugins/modules/btrfs_info.py new file mode 100644 index 0000000000..e05b6e6c6d --- /dev/null +++ b/plugins/modules/btrfs_info.py @@ -0,0 +1,103 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Gregory Furlong +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: btrfs_info +short_description: Query btrfs filesystem info +version_added: "6.6.0" +description: Query status of available btrfs filesystems, including UUID, label, subvolumes and mountpoints. + +author: + - Gregory Furlong (@gnfzdz) + +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +""" + +EXAMPLES = r""" +- name: Query information about mounted btrfs filesystems + community.general.btrfs_info: + register: my_btrfs_info +""" + +RETURN = r""" +filesystems: + description: Summaries of the current state for all btrfs filesystems found on the target host. + type: list + elements: dict + returned: success + contains: + uuid: + description: A unique identifier assigned to the filesystem. + type: str + sample: 96c9c605-1454-49b8-a63a-15e2584c208e + label: + description: An optional label assigned to the filesystem. + type: str + sample: Tank + devices: + description: A list of devices assigned to the filesystem. + type: list + sample: + - /dev/sda1 + - /dev/sdb1 + default_subvolume: + description: The ID of the filesystem's default subvolume. + type: int + sample: 5 + subvolumes: + description: A list of dicts containing metadata for all of the filesystem's subvolumes. + type: list + elements: dict + contains: + id: + description: An identifier assigned to the subvolume, unique within the containing filesystem. + type: int + sample: 256 + mountpoints: + description: Paths where the subvolume is mounted on the targeted host. + type: list + sample: ["/home"] + parent: + description: The identifier of this subvolume's parent. + type: int + sample: 5 + path: + description: The full path of the subvolume relative to the btrfs fileystem's root. + type: str + sample: /@home +""" + + +from ansible_collections.community.general.plugins.module_utils.btrfs import BtrfsFilesystemsProvider +from ansible.module_utils.basic import AnsibleModule + + +def run_module(): + module_args = dict() + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True + ) + + provider = BtrfsFilesystemsProvider(module) + filesystems = [x.get_summary() for x in provider.get_filesystems()] + result = { + "filesystems": filesystems, + } + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/btrfs_subvolume.py b/plugins/modules/btrfs_subvolume.py new file mode 100644 index 0000000000..92c3c99c02 --- /dev/null +++ b/plugins/modules/btrfs_subvolume.py @@ -0,0 +1,676 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Gregory Furlong +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: btrfs_subvolume +short_description: Manage btrfs subvolumes +version_added: "6.6.0" + +description: Creates, updates and deletes btrfs subvolumes and snapshots. + +options: + automount: + description: + - Allow the module to temporarily mount the targeted btrfs filesystem in order to validate the current state and make + any required changes. + type: bool + default: false + default: + description: + - Make the subvolume specified by O(name) the filesystem's default subvolume. + type: bool + default: false + filesystem_device: + description: + - A block device contained within the btrfs filesystem to be targeted. + - Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted. + type: path + filesystem_label: + description: + - A descriptive label assigned to the btrfs filesystem to be targeted. + - Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted. + type: str + filesystem_uuid: + description: + - A unique identifier assigned to the btrfs filesystem to be targeted. + - Useful when multiple btrfs filesystems are present to specify which filesystem should be targeted. + type: str + name: + description: + - Name of the subvolume/snapshot to be targeted. + required: true + type: str + recursive: + description: + - When true, indicates that parent/child subvolumes should be created/removedas necessary to complete the operation + (for O(state=present) and O(state=absent) respectively). + type: bool + default: false + snapshot_source: + description: + - Identifies the source subvolume for the created snapshot. + - Infers that the created subvolume is a snapshot. + type: str + snapshot_conflict: + description: + - Policy defining behavior when a subvolume already exists at the path of the requested snapshot. + - V(skip) - Create a snapshot only if a subvolume does not yet exist at the target location, otherwise indicate that + no change is required. Warning, this option does not yet verify that the target subvolume was generated from a snapshot + of the requested source. + - V(clobber) - If a subvolume already exists at the requested location, delete it first. This option is not idempotent + and results in a new snapshot being generated on every execution. + - V(error) - If a subvolume already exists at the requested location, return an error. This option is not idempotent + and results in an error on replay of the module. + type: str + choices: [skip, clobber, error] + default: skip + state: + description: + - Indicates the current state of the targeted subvolume. + type: str + choices: [absent, present] + default: present + +notes: + - If any or all of the options O(filesystem_device), O(filesystem_label) or O(filesystem_uuid) parameters are provided, + there is expected to be a matching btrfs filesystem. If none are provided and only a single btrfs filesystem exists or + only a single btrfs filesystem is mounted, that filesystem is used; otherwise, the module takes no action and returns an + error. +extends_documentation_fragment: + - community.general.attributes + +attributes: + check_mode: + support: partial + details: + - In some scenarios it may erroneously report intermediate subvolumes being created. After mounting, if a directory + like file is found where the subvolume would have been created, the operation is skipped. + diff_mode: + support: none + +author: + - Gregory Furlong (@gnfzdz) +""" + +EXAMPLES = r""" +- name: Create a @home subvolume under the root subvolume + community.general.btrfs_subvolume: + name: /@home + filesystem_device: /dev/vda2 + +- name: Remove the @home subvolume if it exists + community.general.btrfs_subvolume: + name: /@home + state: absent + filesystem_device: /dev/vda2 + +- name: Create a snapshot of the root subvolume named @ + community.general.btrfs_subvolume: + name: /@ + snapshot_source: / + filesystem_device: /dev/vda2 + +- name: Create a snapshot of the root subvolume and make it the new default subvolume + community.general.btrfs_subvolume: + name: /@ + snapshot_source: / + default: true + filesystem_device: /dev/vda2 + +- name: Create a snapshot of the /@ subvolume and recursively creating intermediate subvolumes as required + community.general.btrfs_subvolume: + name: /@snapshots/@2022_06_09 + snapshot_source: /@ + recursive: true + filesystem_device: /dev/vda2 + +- name: Remove the /@ subvolume and recursively delete child subvolumes as required + community.general.btrfs_subvolume: + name: /@snapshots/@2022_06_09 + snapshot_source: /@ + recursive: true + filesystem_device: /dev/vda2 +""" + +RETURN = r""" +filesystem: + description: + - A summary of the final state of the targeted btrfs filesystem. + type: dict + returned: success + contains: + uuid: + description: A unique identifier assigned to the filesystem. + returned: success + type: str + sample: 96c9c605-1454-49b8-a63a-15e2584c208e + label: + description: An optional label assigned to the filesystem. + returned: success + type: str + sample: Tank + devices: + description: A list of devices assigned to the filesystem. + returned: success + type: list + sample: + - /dev/sda1 + - /dev/sdb1 + default_subvolume: + description: The ID of the filesystem's default subvolume. + returned: success and if filesystem is mounted + type: int + sample: 5 + subvolumes: + description: A list of dicts containing metadata for all of the filesystem's subvolumes. + returned: success and if filesystem is mounted + type: list + elements: dict + contains: + id: + description: An identifier assigned to the subvolume, unique within the containing filesystem. + type: int + sample: 256 + mountpoints: + description: Paths where the subvolume is mounted on the targeted host. + type: list + sample: ["/home"] + parent: + description: The identifier of this subvolume's parent. + type: int + sample: 5 + path: + description: The full path of the subvolume relative to the btrfs fileystem's root. + type: str + sample: /@home + +modifications: + description: + - A list where each element describes a change made to the target btrfs filesystem. + type: list + returned: Success + elements: str + +target_subvolume_id: + description: + - The ID of the subvolume specified with the O(name) parameter, either pre-existing or created as part of module execution. + type: int + sample: 257 + returned: Success and subvolume exists after module execution +""" + +from ansible_collections.community.general.plugins.module_utils.btrfs import BtrfsFilesystemsProvider, BtrfsCommands, BtrfsModuleException +from ansible_collections.community.general.plugins.module_utils.btrfs import normalize_subvolume_path +from ansible.module_utils.basic import AnsibleModule +import os +import tempfile + + +class BtrfsSubvolumeModule(object): + + __BTRFS_ROOT_SUBVOLUME = '/' + __BTRFS_ROOT_SUBVOLUME_ID = 5 + __BTRFS_SUBVOLUME_INODE_NUMBER = 256 + + __CREATE_SUBVOLUME_OPERATION = 'create' + __CREATE_SNAPSHOT_OPERATION = 'snapshot' + __DELETE_SUBVOLUME_OPERATION = 'delete' + __SET_DEFAULT_SUBVOLUME_OPERATION = 'set-default' + + __UNKNOWN_SUBVOLUME_ID = '?' + + def __init__(self, module): + self.module = module + self.__btrfs_api = BtrfsCommands(module) + self.__provider = BtrfsFilesystemsProvider(module) + + # module parameters + name = self.module.params['name'] + self.__name = normalize_subvolume_path(name) if name is not None else None + self.__state = self.module.params['state'] + + self.__automount = self.module.params['automount'] + self.__default = self.module.params['default'] + self.__filesystem_device = self.module.params['filesystem_device'] + self.__filesystem_label = self.module.params['filesystem_label'] + self.__filesystem_uuid = self.module.params['filesystem_uuid'] + self.__recursive = self.module.params['recursive'] + self.__snapshot_conflict = self.module.params['snapshot_conflict'] + snapshot_source = self.module.params['snapshot_source'] + self.__snapshot_source = normalize_subvolume_path(snapshot_source) if snapshot_source is not None else None + + # execution state + self.__filesystem = None + self.__required_mounts = [] + self.__unit_of_work = [] + self.__completed_work = [] + self.__temporary_mounts = dict() + + def run(self): + error = None + try: + self.__load_filesystem() + self.__prepare_unit_of_work() + + if not self.module.check_mode: + # check required mounts & mount + if len(self.__unit_of_work) > 0: + self.__execute_unit_of_work() + self.__filesystem.refresh() + else: + # check required mounts + self.__completed_work.extend(self.__unit_of_work) + except Exception as e: + error = e + finally: + self.__cleanup_mounts() + if self.__filesystem is not None: + self.__filesystem.refresh_mountpoints() + + return (error, self.get_results()) + + # Identify the targeted filesystem and obtain the current state + def __load_filesystem(self): + if self.__has_filesystem_criteria(): + filesystem = self.__find_matching_filesytem() + else: + filesystem = self.__find_default_filesystem() + + # The filesystem must be mounted to obtain the current state (subvolumes, default, etc) + if not filesystem.is_mounted(): + if not self.__automount: + raise BtrfsModuleException( + "Target filesystem uuid=%s is not currently mounted and automount=False." + "Mount explicitly before module execution or pass automount=True" % filesystem.uuid) + elif self.module.check_mode: + # TODO is failing the module an appropriate outcome in this scenario? + raise BtrfsModuleException( + "Target filesystem uuid=%s is not currently mounted. Unable to validate the current" + "state while running with check_mode=True" % filesystem.uuid) + else: + self.__mount_subvolume_id_to_tempdir(filesystem, self.__BTRFS_ROOT_SUBVOLUME_ID) + filesystem.refresh() + self.__filesystem = filesystem + + def __has_filesystem_criteria(self): + return self.__filesystem_uuid is not None or self.__filesystem_label is not None or self.__filesystem_device is not None + + def __find_matching_filesytem(self): + criteria = { + 'uuid': self.__filesystem_uuid, + 'label': self.__filesystem_label, + 'device': self.__filesystem_device, + } + return self.__provider.get_matching_filesystem(criteria) + + def __find_default_filesystem(self): + filesystems = self.__provider.get_filesystems() + filesystem = None + + if len(filesystems) == 1: + filesystem = filesystems[0] + else: + mounted_filesystems = [x for x in filesystems if x.is_mounted()] + if len(mounted_filesystems) == 1: + filesystem = mounted_filesystems[0] + + if filesystem is not None: + return filesystem + else: + raise BtrfsModuleException( + "Failed to automatically identify targeted filesystem. " + "No explicit device indicated and found %d available filesystems." % len(filesystems) + ) + + # Prepare unit of work + def __prepare_unit_of_work(self): + if self.__state == "present": + if self.__snapshot_source is None: + self.__prepare_subvolume_present() + else: + self.__prepare_snapshot_present() + + if self.__default: + self.__prepare_set_default() + elif self.__state == "absent": + self.__prepare_subvolume_absent() + + def __prepare_subvolume_present(self): + subvolume = self.__filesystem.get_subvolume_by_name(self.__name) + if subvolume is None: + self.__prepare_before_create_subvolume(self.__name) + self.__stage_create_subvolume(self.__name) + + def __prepare_before_create_subvolume(self, subvolume_name): + closest_parent = self.__filesystem.get_nearest_subvolume(subvolume_name) + self.__stage_required_mount(closest_parent) + if self.__recursive: + self.__prepare_create_intermediates(closest_parent, subvolume_name) + + def __prepare_create_intermediates(self, closest_subvolume, subvolume_name): + relative_path = closest_subvolume.get_child_relative_path(self.__name) + missing_subvolumes = [x for x in relative_path.split(os.path.sep) if len(x) > 0] + if len(missing_subvolumes) > 1: + current = closest_subvolume.path + for s in missing_subvolumes[:-1]: + separator = os.path.sep if current[-1] != os.path.sep else "" + current = current + separator + s + self.__stage_create_subvolume(current, True) + + def __prepare_snapshot_present(self): + source_subvolume = self.__filesystem.get_subvolume_by_name(self.__snapshot_source) + subvolume = self.__filesystem.get_subvolume_by_name(self.__name) + subvolume_exists = subvolume is not None + + if subvolume_exists: + if self.__snapshot_conflict == "skip": + # No change required + return + elif self.__snapshot_conflict == "error": + raise BtrfsModuleException("Target subvolume=%s already exists and snapshot_conflict='error'" % self.__name) + + if source_subvolume is None: + raise BtrfsModuleException("Source subvolume %s does not exist" % self.__snapshot_source) + elif subvolume is not None and source_subvolume.id == subvolume.id: + raise BtrfsModuleException("Snapshot source and target are the same.") + else: + self.__stage_required_mount(source_subvolume) + + if subvolume_exists and self.__snapshot_conflict == "clobber": + self.__prepare_delete_subvolume_tree(subvolume) + elif not subvolume_exists: + self.__prepare_before_create_subvolume(self.__name) + + self.__stage_create_snapshot(source_subvolume, self.__name) + + def __prepare_subvolume_absent(self): + subvolume = self.__filesystem.get_subvolume_by_name(self.__name) + if subvolume is not None: + self.__prepare_delete_subvolume_tree(subvolume) + + def __prepare_delete_subvolume_tree(self, subvolume): + if subvolume.is_filesystem_root(): + raise BtrfsModuleException("Can not delete the filesystem's root subvolume") + if not self.__recursive and len(subvolume.get_child_subvolumes()) > 0: + raise BtrfsModuleException("Subvolume targeted for deletion %s has children and recursive=False." + "Either explicitly delete the child subvolumes first or pass " + "parameter recursive=True." % subvolume.path) + + self.__stage_required_mount(subvolume.get_parent_subvolume()) + queue = self.__prepare_recursive_delete_order(subvolume) if self.__recursive else [subvolume] + # prepare unit of work + for s in queue: + if s.is_mounted(): + # TODO potentially unmount the subvolume if automount=True ? + raise BtrfsModuleException("Can not delete mounted subvolume=%s" % s.path) + if s.is_filesystem_default(): + self.__stage_set_default_subvolume(self.__BTRFS_ROOT_SUBVOLUME, self.__BTRFS_ROOT_SUBVOLUME_ID) + self.__stage_delete_subvolume(s) + + def __prepare_recursive_delete_order(self, subvolume): + """Return the subvolume and all descendents as a list, ordered so that descendents always occur before their ancestors""" + pending = [subvolume] + ordered = [] + while len(pending) > 0: + next = pending.pop() + ordered.append(next) + pending.extend(next.get_child_subvolumes()) + ordered.reverse() # reverse to ensure children are deleted before their parent + return ordered + + def __prepare_set_default(self): + subvolume = self.__filesystem.get_subvolume_by_name(self.__name) + subvolume_id = subvolume.id if subvolume is not None else None + + if self.__filesystem.default_subvolid != subvolume_id: + self.__stage_set_default_subvolume(self.__name, subvolume_id) + + # Stage operations to the unit of work + def __stage_required_mount(self, subvolume): + if subvolume.get_mounted_path() is None: + if self.__automount: + self.__required_mounts.append(subvolume) + else: + raise BtrfsModuleException("The requested changes will require the subvolume '%s' to be mounted, but automount=False" % subvolume.path) + + def __stage_create_subvolume(self, subvolume_path, intermediate=False): + """ + Add required creation of an intermediate subvolume to the unit of work + If intermediate is true, the action will be skipped if a directory like file is found at target + after mounting a parent subvolume + """ + self.__unit_of_work.append({ + 'action': self.__CREATE_SUBVOLUME_OPERATION, + 'target': subvolume_path, + 'intermediate': intermediate, + }) + + def __stage_create_snapshot(self, source_subvolume, target_subvolume_path): + """Add creation of a snapshot from source to target to the unit of work""" + self.__unit_of_work.append({ + 'action': self.__CREATE_SNAPSHOT_OPERATION, + 'source': source_subvolume.path, + 'source_id': source_subvolume.id, + 'target': target_subvolume_path, + }) + + def __stage_delete_subvolume(self, subvolume): + """Add deletion of the target subvolume to the unit of work""" + self.__unit_of_work.append({ + 'action': self.__DELETE_SUBVOLUME_OPERATION, + 'target': subvolume.path, + 'target_id': subvolume.id, + }) + + def __stage_set_default_subvolume(self, subvolume_path, subvolume_id=None): + """Add update of the filesystem's default subvolume to the unit of work""" + self.__unit_of_work.append({ + 'action': self.__SET_DEFAULT_SUBVOLUME_OPERATION, + 'target': subvolume_path, + 'target_id': subvolume_id, + }) + + # Execute the unit of work + def __execute_unit_of_work(self): + self.__check_required_mounts() + for op in self.__unit_of_work: + if op['action'] == self.__CREATE_SUBVOLUME_OPERATION: + self.__execute_create_subvolume(op) + elif op['action'] == self.__CREATE_SNAPSHOT_OPERATION: + self.__execute_create_snapshot(op) + elif op['action'] == self.__DELETE_SUBVOLUME_OPERATION: + self.__execute_delete_subvolume(op) + elif op['action'] == self.__SET_DEFAULT_SUBVOLUME_OPERATION: + self.__execute_set_default_subvolume(op) + else: + raise ValueError("Unknown operation type '%s'" % op['action']) + + def __execute_create_subvolume(self, operation): + target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target']) + if not self.__is_existing_directory_like(target_mounted_path): + self.__btrfs_api.subvolume_create(target_mounted_path) + self.__completed_work.append(operation) + + def __execute_create_snapshot(self, operation): + source_subvolume = self.__filesystem.get_subvolume_by_name(operation['source']) + source_mounted_path = source_subvolume.get_mounted_path() + target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target']) + + self.__btrfs_api.subvolume_snapshot(source_mounted_path, target_mounted_path) + self.__completed_work.append(operation) + + def __execute_delete_subvolume(self, operation): + target_mounted_path = self.__filesystem.get_mountpath_as_child(operation['target']) + self.__btrfs_api.subvolume_delete(target_mounted_path) + self.__completed_work.append(operation) + + def __execute_set_default_subvolume(self, operation): + target = operation['target'] + target_id = operation['target_id'] + + if target_id is None: + target_subvolume = self.__filesystem.get_subvolume_by_name(target) + + if target_subvolume is None: + self.__filesystem.refresh() # the target may have been created earlier in module execution + target_subvolume = self.__filesystem.get_subvolume_by_name(target) + + if target_subvolume is None: + raise BtrfsModuleException("Failed to find existing subvolume '%s'" % target) + else: + target_id = target_subvolume.id + + self.__btrfs_api.subvolume_set_default(self.__filesystem.get_any_mountpoint(), target_id) + self.__completed_work.append(operation) + + def __is_existing_directory_like(self, path): + return os.path.exists(path) and ( + os.path.isdir(path) or + os.stat(path).st_ino == self.__BTRFS_SUBVOLUME_INODE_NUMBER + ) + + def __check_required_mounts(self): + filtered = self.__filter_child_subvolumes(self.__required_mounts) + if len(filtered) > 0: + for subvolume in filtered: + self.__mount_subvolume_id_to_tempdir(self.__filesystem, subvolume.id) + self.__filesystem.refresh_mountpoints() + + def __filter_child_subvolumes(self, subvolumes): + """Filter the provided list of subvolumes to remove any that are a child of another item in the list""" + filtered = [] + last = None + ordered = sorted(subvolumes, key=lambda x: x.path) + for next in ordered: + if last is None or not next.path[0:len(last)] == last: + filtered.append(next) + last = next.path + return filtered + + # Create/cleanup temporary mountpoints + def __mount_subvolume_id_to_tempdir(self, filesystem, subvolid): + # this check should be redundant + if self.module.check_mode or not self.__automount: + raise BtrfsModuleException("Unable to temporarily mount required subvolumes" + "with automount=%s and check_mode=%s" % (self.__automount, self.module.check_mode)) + + cache_key = "%s:%d" % (filesystem.uuid, subvolid) + # The subvolume was already mounted, so return the current path + if cache_key in self.__temporary_mounts: + return self.__temporary_mounts[cache_key] + + device = filesystem.devices[0] + mountpoint = tempfile.mkdtemp(dir="/tmp") + self.__temporary_mounts[cache_key] = mountpoint + + mount = self.module.get_bin_path("mount", required=True) + command = [mount, "-o", "noatime,subvolid=%d" % subvolid, device, mountpoint] + result = self.module.run_command(command, check_rc=True) + + return mountpoint + + def __cleanup_mounts(self): + for key in self.__temporary_mounts.keys(): + self.__cleanup_mount(self.__temporary_mounts[key]) + + def __cleanup_mount(self, mountpoint): + umount = self.module.get_bin_path("umount", required=True) + result = self.module.run_command([umount, mountpoint]) + if result[0] == 0: + rmdir = self.module.get_bin_path("rmdir", required=True) + self.module.run_command([rmdir, mountpoint]) + + # Format and return results + def get_results(self): + target = self.__filesystem.get_subvolume_by_name(self.__name) + return dict( + changed=len(self.__completed_work) > 0, + filesystem=self.__filesystem.get_summary(), + modifications=self.__get_formatted_modifications(), + target_subvolume_id=(target.id if target is not None else None) + ) + + def __get_formatted_modifications(self): + return [self.__format_operation_result(op) for op in self.__completed_work] + + def __format_operation_result(self, operation): + action_type = operation['action'] + if action_type == self.__CREATE_SUBVOLUME_OPERATION: + return self.__format_create_subvolume_result(operation) + elif action_type == self.__CREATE_SNAPSHOT_OPERATION: + return self.__format_create_snapshot_result(operation) + elif action_type == self.__DELETE_SUBVOLUME_OPERATION: + return self.__format_delete_subvolume_result(operation) + elif action_type == self.__SET_DEFAULT_SUBVOLUME_OPERATION: + return self.__format_set_default_subvolume_result(operation) + else: + raise ValueError("Unknown operation type '%s'" % operation['action']) + + def __format_create_subvolume_result(self, operation): + target = operation['target'] + target_subvolume = self.__filesystem.get_subvolume_by_name(target) + target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID + return "Created subvolume '%s' (%s)" % (target, target_id) + + def __format_create_snapshot_result(self, operation): + source = operation['source'] + source_id = operation['source_id'] + + target = operation['target'] + target_subvolume = self.__filesystem.get_subvolume_by_name(target) + target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID + return "Created snapshot '%s' (%s) from '%s' (%s)" % (target, target_id, source, source_id) + + def __format_delete_subvolume_result(self, operation): + target = operation['target'] + target_id = operation['target_id'] + return "Deleted subvolume '%s' (%s)" % (target, target_id) + + def __format_set_default_subvolume_result(self, operation): + target = operation['target'] + if 'target_id' in operation: + target_id = operation['target_id'] + else: + target_subvolume = self.__filesystem.get_subvolume_by_name(target) + target_id = target_subvolume.id if target_subvolume is not None else self.__UNKNOWN_SUBVOLUME_ID + return "Updated default subvolume to '%s' (%s)" % (target, target_id) + + +def run_module(): + module_args = dict( + automount=dict(type='bool', default=False), + default=dict(type='bool', default=False), + filesystem_device=dict(type='path'), + filesystem_label=dict(type='str'), + filesystem_uuid=dict(type='str'), + name=dict(type='str', required=True), + recursive=dict(type='bool', default=False), + state=dict(type='str', default='present', choices=['present', 'absent']), + snapshot_source=dict(type='str'), + snapshot_conflict=dict(type='str', default='skip', choices=['skip', 'clobber', 'error']) + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True + ) + + subvolume = BtrfsSubvolumeModule(module) + error, result = subvolume.run() + if error is not None: + module.fail_json(str(error), **result) + else: + module.exit_json(**result) + + +def main(): + run_module() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/bundler.py b/plugins/modules/bundler.py index 5811fa720c..2395cda332 100644 --- a/plugins/modules/bundler.py +++ b/plugins/modules/bundler.py @@ -1,98 +1,94 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2015, Tim Hoiberg # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: bundler short_description: Manage Ruby Gem dependencies with Bundler description: - - Manage installation and Gem version dependencies for Ruby using the Bundler gem + - Manage installation and Gem version dependencies for Ruby using the Bundler gem. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: executable: type: str description: - - The path to the bundler executable + - The path to the bundler executable. state: type: str description: - - The desired state of the Gem bundle. C(latest) updates gems to the most recent, acceptable version + - The desired state of the Gem bundle. V(latest) updates gems to the most recent, acceptable version. choices: [present, latest] default: present chdir: type: path description: - - The directory to execute the bundler commands from. This directory - needs to contain a valid Gemfile or .bundle/ directory - - If not specified, it will default to the temporary working directory + - The directory to execute the bundler commands from. This directory needs to contain a valid Gemfile or .bundle/ directory. + - If not specified, it defaults to the temporary working directory. exclude_groups: type: list elements: str description: - - A list of Gemfile groups to exclude during operations. This only - applies when state is C(present). Bundler considers this - a 'remembered' property for the Gemfile and will automatically exclude - groups in future operations even if C(exclude_groups) is not set + - A list of Gemfile groups to exclude during operations. This only applies when O(state=present). Bundler considers + this a 'remembered' property for the Gemfile and automatically excludes groups in future operations even if O(exclude_groups) + is not set. clean: description: - - Only applies if state is C(present). If set removes any gems on the - target host that are not in the gemfile + - Only applies if O(state=present). If set removes any gems on the target host that are not in the gemfile. type: bool default: false gemfile: type: path description: - - Only applies if state is C(present). The path to the gemfile to use to install gems. - - If not specified it will default to the Gemfile in current directory + - Only applies if O(state=present). The path to the gemfile to use to install gems. + - If not specified it defaults to the Gemfile in current directory. local: description: - - If set only installs gems from the cache on the target host + - If set only installs gems from the cache on the target host. type: bool default: false deployment_mode: description: - - Only applies if state is C(present). If set it will install gems in - ./vendor/bundle instead of the default location. Requires a Gemfile.lock - file to have been created prior + - Only applies if O(state=present). If set it installs gems in C(./vendor/bundle) instead of the default location. Requires + a C(Gemfile.lock) file to have been created prior. type: bool default: false user_install: description: - - Only applies if state is C(present). Installs gems in the local user's cache or for all users + - Only applies if O(state=present). Installs gems in the local user's cache or for all users. type: bool default: true gem_path: type: path description: - - Only applies if state is C(present). Specifies the directory to - install the gems into. If C(chdir) is set then this path is relative to - C(chdir) - - If not specified the default RubyGems gem paths will be used. + - Only applies if O(state=present). Specifies the directory to install the gems into. If O(chdir) is set then this path + is relative to O(chdir). + - If not specified the default RubyGems gem paths are used. binstub_directory: type: path description: - - Only applies if state is C(present). Specifies the directory to - install any gem bins files to. When executed the bin files will run - within the context of the Gemfile and fail if any required gem - dependencies are not installed. If C(chdir) is set then this path is - relative to C(chdir) + - Only applies if O(state=present). Specifies the directory to install any gem bins files to. When executed the bin + files run within the context of the Gemfile and fail if any required gem dependencies are not installed. If O(chdir) + is set then this path is relative to O(chdir). extra_args: type: str description: - - A space separated string of additional commands that can be applied to - the Bundler command. Refer to the Bundler documentation for more - information + - A space separated string of additional commands that can be applied to the Bundler command. Refer to the Bundler documentation + for more information. author: "Tim Hoiberg (@thoiberg)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install gems from a Gemfile in the current directory community.general.bundler: state: present @@ -117,7 +113,7 @@ EXAMPLES = ''' community.general.bundler: state: latest chdir: ~/rails_project -''' +""" from ansible.module_utils.basic import AnsibleModule @@ -133,18 +129,18 @@ def get_bundler_executable(module): def main(): module = AnsibleModule( argument_spec=dict( - executable=dict(default=None, required=False), - state=dict(default='present', required=False, choices=['present', 'latest']), - chdir=dict(default=None, required=False, type='path'), - exclude_groups=dict(default=None, required=False, type='list', elements='str'), - clean=dict(default=False, required=False, type='bool'), - gemfile=dict(default=None, required=False, type='path'), - local=dict(default=False, required=False, type='bool'), - deployment_mode=dict(default=False, required=False, type='bool'), - user_install=dict(default=True, required=False, type='bool'), - gem_path=dict(default=None, required=False, type='path'), - binstub_directory=dict(default=None, required=False, type='path'), - extra_args=dict(default=None, required=False), + executable=dict(), + state=dict(default='present', choices=['present', 'latest']), + chdir=dict(type='path'), + exclude_groups=dict(type='list', elements='str'), + clean=dict(default=False, type='bool'), + gemfile=dict(type='path'), + local=dict(default=False, type='bool'), + deployment_mode=dict(default=False, type='bool'), + user_install=dict(default=True, type='bool'), + gem_path=dict(type='path'), + binstub_directory=dict(type='path'), + extra_args=dict(), ), supports_check_mode=True ) diff --git a/plugins/modules/bzr.py b/plugins/modules/bzr.py index 7832183806..3493b9476d 100644 --- a/plugins/modules/bzr.py +++ b/plugins/modules/bzr.py @@ -1,61 +1,61 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2013, André Paramés # Based on the Git module by Michael DeHaan # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: bzr author: -- André Paramés (@andreparames) + - André Paramés (@andreparames) short_description: Deploy software (or files) from bzr branches description: - - Manage I(bzr) branches to deploy files or software. + - Manage C(bzr) branches to deploy files or software. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - name: - description: - - SSH or HTTP protocol address of the parent branch. - aliases: [ parent ] - required: true - type: str - dest: - description: - - Absolute path of where the branch should be cloned to. - required: true - type: path - version: - description: - - What version of the branch to clone. This can be the - bzr revno or revid. - default: head - type: str - force: - description: - - If C(true), any modified files in the working - tree will be discarded. Before 1.9 the default - value was C(true). - type: bool - default: false - executable: - description: - - Path to bzr executable to use. If not supplied, - the normal mechanism for resolving binary paths will be used. - type: str -''' + name: + description: + - SSH or HTTP protocol address of the parent branch. + aliases: [parent] + required: true + type: str + dest: + description: + - Absolute path of where the branch should be cloned to. + required: true + type: path + version: + description: + - What version of the branch to clone. This can be the bzr revno or revid. + default: head + type: str + force: + description: + - If V(true), any modified files in the working tree is discarded. + type: bool + default: false + executable: + description: + - Path to C(bzr) executable to use. If not supplied, the normal mechanism for resolving binary paths is used. + type: str +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Checkout community.general.bzr: name: bzr+ssh://foosball.example.org/path/to/branch dest: /srv/checkout version: 22 -''' +""" import os import re @@ -78,7 +78,7 @@ class Bzr(object): def get_version(self): '''samples the version of the bzr branch''' - cmd = "%s revno" % self.bzr_path + cmd = [self.bzr_path, "revno"] rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest) revno = stdout.strip() return revno @@ -98,11 +98,12 @@ class Bzr(object): def has_local_mods(self): - cmd = "%s status -S" % self.bzr_path + cmd = [self.bzr_path, "status", "-S"] rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest) lines = stdout.splitlines() + mods_re = re.compile('^\\?\\?.*$') - lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines) + lines = [c for c in lines if not mods_re.search(c)] return len(lines) > 0 def reset(self, force): diff --git a/plugins/modules/campfire.py b/plugins/modules/campfire.py index dfc9af1ce1..c1da278634 100644 --- a/plugins/modules/campfire.py +++ b/plugins/modules/campfire.py @@ -1,21 +1,25 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: campfire short_description: Send a message to Campfire description: - - Send a message to Campfire. - - Messages with newlines will result in a "Paste" message being sent. + - Send a message to Campfire. + - Messages with newlines result in a "Paste" message being sent. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: subscription: type: str @@ -42,22 +46,58 @@ options: description: - Send a notification sound before the message. required: false - choices: ["56k", "bell", "bezos", "bueller", "clowntown", - "cottoneyejoe", "crickets", "dadgummit", "dangerzone", - "danielsan", "deeper", "drama", "greatjob", "greyjoy", - "guarantee", "heygirl", "horn", "horror", - "inconceivable", "live", "loggins", "makeitso", "noooo", - "nyan", "ohmy", "ohyeah", "pushit", "rimshot", - "rollout", "rumble", "sax", "secret", "sexyback", - "story", "tada", "tmyk", "trololo", "trombone", "unix", - "vuvuzela", "what", "whoomp", "yeah", "yodel"] + choices: + - 56k + - bell + - bezos + - bueller + - clowntown + - cottoneyejoe + - crickets + - dadgummit + - dangerzone + - danielsan + - deeper + - drama + - greatjob + - greyjoy + - guarantee + - heygirl + - horn + - horror + - inconceivable + - live + - loggins + - makeitso + - noooo + - nyan + - ohmy + - ohyeah + - pushit + - rimshot + - rollout + - rumble + - sax + - secret + - sexyback + - story + - tada + - tmyk + - trololo + - trombone + - unix + - vuvuzela + - what + - whoomp + - yeah + - yodel # informational: requirements for nodes -requirements: [ ] +requirements: [] author: "Adam Garside (@fabulops)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Send a message to Campfire community.general.campfire: subscription: foo @@ -72,7 +112,7 @@ EXAMPLES = ''' room: 123 notify: loggins msg: Task completed ... with feeling. -''' +""" try: from html import escape as html_escape @@ -95,8 +135,7 @@ def main(): token=dict(required=True, no_log=True), room=dict(required=True), msg=dict(required=True), - notify=dict(required=False, - choices=["56k", "bell", "bezos", "bueller", + notify=dict(choices=["56k", "bell", "bezos", "bueller", "clowntown", "cottoneyejoe", "crickets", "dadgummit", "dangerzone", "danielsan", "deeper", "drama", diff --git a/plugins/modules/capabilities.py b/plugins/modules/capabilities.py index 9309958eca..64df086d67 100644 --- a/plugins/modules/capabilities.py +++ b/plugins/modules/capabilities.py @@ -1,48 +1,52 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2014, Nate Coraor # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: capabilities short_description: Manage Linux capabilities description: - - This module manipulates files privileges using the Linux capabilities(7) system. + - This module manipulates files privileges using the Linux capabilities(7) system. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - path: - description: - - Specifies the path to the file to be managed. - type: str - required: true - aliases: [ key ] - capability: - description: - - Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent)) - type: str - required: true - aliases: [ cap ] - state: - description: - - Whether the entry should be present or absent in the file's capabilities. - type: str - choices: [ absent, present ] - default: present + path: + description: + - Specifies the path to the file to be managed. + type: str + required: true + aliases: [key] + capability: + description: + - Desired capability to set (with operator and flags, if O(state=present)) or remove (if O(state=absent)). + type: str + required: true + aliases: [cap] + state: + description: + - Whether the entry should be present or absent in the file's capabilities. + type: str + choices: [absent, present] + default: present notes: - - The capabilities system will automatically transform operators and flags into the effective set, - so for example, C(cap_foo=ep) will probably become C(cap_foo+ep). - - This module does not attempt to determine the final operator and flags to compare, - so you will want to ensure that your capabilities argument matches the final capabilities. + - The capabilities system automatically transforms operators and flags into the effective set, so for example, C(cap_foo=ep) + probably becomes C(cap_foo+ep). + - This module does not attempt to determine the final operator and flags to compare, so you want to ensure that your capabilities + argument matches the final capabilities. author: -- Nate Coraor (@natefoo) -''' + - Nate Coraor (@natefoo) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Set cap_sys_chroot+ep on /foo community.general.capabilities: path: /foo @@ -54,7 +58,7 @@ EXAMPLES = r''' path: /bar capability: cap_net_bind_service state: absent -''' +""" from ansible.module_utils.basic import AnsibleModule @@ -86,8 +90,8 @@ class CapabilitiesModule(object): if self.module.check_mode: self.module.exit_json(changed=True, msg='capabilities changed') else: - # remove from current cap list if it's already set (but op/flags differ) - current = list(filter(lambda x: x[0] != self.capability_tup[0], current)) + # remove from current cap list if it is already set (but op/flags differ) + current = [x for x in current if x[0] != self.capability_tup[0]] # add new cap with correct op/flags current.append(self.capability_tup) self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current)) @@ -97,13 +101,13 @@ class CapabilitiesModule(object): self.module.exit_json(changed=True, msg='capabilities changed') else: # remove from current cap list and then set current list - current = filter(lambda x: x[0] != self.capability_tup[0], current) + current = [x for x in current if x[0] != self.capability_tup[0]] self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current)) self.module.exit_json(changed=False, state=self.state) def getcap(self, path): rval = [] - cmd = "%s -v %s" % (self.getcap_cmd, path) + cmd = [self.getcap_cmd, "-v", path] rc, stdout, stderr = self.module.run_command(cmd) # If file xattrs are set but no caps are set the output will be: # '/foo =' @@ -117,6 +121,8 @@ class CapabilitiesModule(object): if ' =' in stdout: # process output of an older version of libcap caps = stdout.split(' =')[1].strip().split() + elif stdout.strip().endswith(")"): # '/foo (Error Message)' + self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr) else: # otherwise, we have a newer version here # see original commit message of cap/v0.2.40-18-g177cd41 in libcap.git @@ -136,7 +142,7 @@ class CapabilitiesModule(object): def setcap(self, path, caps): caps = ' '.join([''.join(cap) for cap in caps]) - cmd = "%s '%s' %s" % (self.setcap_cmd, caps, path) + cmd = [self.setcap_cmd, caps, path] rc, stdout, stderr = self.module.run_command(cmd) if rc != 0: self.module.fail_json(msg="Unable to set capabilities of %s" % path, stdout=stdout, stderr=stderr) diff --git a/plugins/modules/cargo.py b/plugins/modules/cargo.py index 787edbd256..3ec0012ca0 100644 --- a/plugins/modules/cargo.py +++ b/plugins/modules/cargo.py @@ -1,23 +1,32 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2021 Radek Sprta +# Copyright (c) 2024 Colin Nolan # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - +from __future__ import annotations DOCUMENTATION = r""" ---- module: cargo short_description: Manage Rust packages with cargo version_added: 4.3.0 description: - Manage Rust packages with cargo. author: "Radek Sprta (@radek-sprta)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: + executable: + description: + - Path to the C(cargo) installed in the system. + - If not specified, the module looks for C(cargo) in E(PATH). + type: path + version_added: 7.5.0 name: description: - The name of a Rust package to install. @@ -25,27 +34,47 @@ options: elements: str required: true path: - description: - -> - The base path where to install the Rust packages. Cargo automatically appends - C(/bin). In other words, C(/usr/local) will become C(/usr/local/bin). + description: The base path where to install the Rust packages. Cargo automatically appends V(/bin). In other words, V(/usr/local) + becomes V(/usr/local/bin). type: path version: - description: - -> - The version to install. If I(name) contains multiple values, the module will - try to install all of them in this version. + description: The version to install. If O(name) contains multiple values, the module tries to install all of them in this + version. type: str required: false + locked: + description: + - Install with locked dependencies. + - This is only used when installing packages. + required: false + type: bool + default: false + version_added: 7.5.0 state: description: - The state of the Rust package. required: false type: str default: present - choices: [ "present", "absent", "latest" ] + choices: ["present", "absent", "latest"] + directory: + description: + - Path to the source directory to install the Rust package from. + - This is only used when installing packages. + type: path + required: false + version_added: 9.1.0 + features: + description: + - List of features to activate. + - This is only used when installing packages. + type: list + elements: str + required: false + default: [] + version_added: 11.0.0 requirements: - - cargo installed in bin path (recommended /usr/local/bin) + - cargo installed """ EXAMPLES = r""" @@ -53,6 +82,11 @@ EXAMPLES = r""" community.general.cargo: name: ludusavi +- name: Install "ludusavi" Rust package with locked dependencies + community.general.cargo: + name: ludusavi + locked: true + - name: Install "ludusavi" Rust package in version 0.10.0 community.general.cargo: name: ludusavi @@ -72,8 +106,20 @@ EXAMPLES = r""" community.general.cargo: name: ludusavi state: latest + +- name: Install "ludusavi" Rust package from source directory + community.general.cargo: + name: ludusavi + directory: /path/to/ludusavi/source + +- name: Install "serpl" Rust package with ast_grep feature + community.general.cargo: + name: serpl + features: + - ast_grep """ +import json import os import re @@ -83,12 +129,14 @@ from ansible.module_utils.basic import AnsibleModule class Cargo(object): def __init__(self, module, **kwargs): self.module = module + self.executable = [kwargs["executable"] or module.get_bin_path("cargo", True)] self.name = kwargs["name"] self.path = kwargs["path"] self.state = kwargs["state"] self.version = kwargs["version"] - - self.executable = [module.get_bin_path("cargo", True)] + self.locked = kwargs["locked"] + self.directory = kwargs["directory"] + self.features = kwargs["features"] @property def path(self): @@ -111,9 +159,13 @@ class Cargo(object): def get_installed(self): cmd = ["install", "--list"] + if self.path: + cmd.append("--root") + cmd.append(self.path) + data, dummy = self._exec(cmd, True, False, False) - package_regex = re.compile(r"^([\w\-]+) v(.+):$") + package_regex = re.compile(r"^([\w\-]+) v(\S+).*:$") installed = {} for line in data.splitlines(): package_info = package_regex.match(line) @@ -125,25 +177,63 @@ class Cargo(object): def install(self, packages=None): cmd = ["install"] cmd.extend(packages or self.name) + if self.locked: + cmd.append("--locked") if self.path: cmd.append("--root") cmd.append(self.path) if self.version: cmd.append("--version") cmd.append(self.version) + if self.directory: + cmd.append("--path") + cmd.append(self.directory) + if self.features: + cmd += ["--features", ",".join(self.features)] return self._exec(cmd) def is_outdated(self, name): installed_version = self.get_installed().get(name) + latest_version = ( + self.get_latest_published_version(name) + if not self.directory + else self.get_source_directory_version(name) + ) + return installed_version != latest_version + def get_latest_published_version(self, name): cmd = ["search", name, "--limit", "1"] data, dummy = self._exec(cmd, True, False, False) match = re.search(r'"(.+)"', data) - if match: - latest_version = match.group(1) + if not match: + self.module.fail_json( + msg="No published version for package %s found" % name + ) + return match.group(1) - return installed_version != latest_version + def get_source_directory_version(self, name): + cmd = [ + "metadata", + "--format-version", + "1", + "--no-deps", + "--manifest-path", + os.path.join(self.directory, "Cargo.toml"), + ] + data, dummy = self._exec(cmd, True, False, False) + manifest = json.loads(data) + + package = next( + (package for package in manifest["packages"] if package["name"] == name), + None, + ) + if not package: + self.module.fail_json( + msg="Package %s not defined in source, found: %s" + % (name, [x["name"] for x in manifest["packages"]]) + ) + return package["version"] def uninstall(self, packages=None): cmd = ["uninstall"] @@ -153,27 +243,34 @@ class Cargo(object): def main(): arg_spec = dict( + executable=dict(type="path"), name=dict(required=True, type="list", elements="str"), - path=dict(default=None, type="path"), + path=dict(type="path"), state=dict(default="present", choices=["present", "absent", "latest"]), - version=dict(default=None, type="str"), + version=dict(type="str"), + locked=dict(default=False, type="bool"), + directory=dict(type="path"), + features=dict(default=[], type="list", elements="str"), ) module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) name = module.params["name"] - path = module.params["path"] state = module.params["state"] version = module.params["version"] + directory = module.params["directory"] if not name: module.fail_json(msg="Package name must be specified") + if directory is not None and not os.path.isdir(directory): + module.fail_json(msg="Source directory does not exist") + # Set LANG env since we parse stdout module.run_command_environ_update = dict( LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C" ) - cargo = Cargo(module, name=name, path=path, state=state, version=version) + cargo = Cargo(module, **module.params) changed, out, err = False, None, None installed_packages = cargo.get_installed() if state == "present": diff --git a/plugins/modules/catapult.py b/plugins/modules/catapult.py index 8b8c72974d..053eb4b51b 100644 --- a/plugins/modules/catapult.py +++ b/plugins/modules/catapult.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2016, Jonathan Mainguy # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -7,27 +6,38 @@ # # basis of code taken from the ansible twillio and nexmo modules -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: catapult -short_description: Send a sms / mms using the catapult bandwidth api +short_description: Send a sms / mms using the catapult bandwidth API description: - - Allows notifications to be sent using sms / mms via the catapult bandwidth api. + - Allows notifications to be sent using SMS / MMS using the catapult bandwidth API. +deprecated: + removed_in: 13.0.0 + why: >- + DNS fails to resolve the API endpoint used by the module since Oct 2024. + See L(the associated issue, https://github.com/ansible-collections/community.general/issues/10318) for details. + alternative: There is none. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: src: type: str description: - - One of your catapult telephone numbers the message should come from (must be in E.164 format, like C(+19195551212)). + - One of your catapult telephone numbers the message should come from (must be in E.164 format, like V(+19195551212)). required: true dest: type: list elements: str description: - - The phone number or numbers the message should be sent to (must be in E.164 format, like C(+19195551212)). + - The phone number or numbers the message should be sent to (must be in E.164 format, like V(+19195551212)). required: true msg: type: str @@ -37,31 +47,30 @@ options: media: type: str description: - - For MMS messages, a media url to the location of the media to be sent with the message. + - For MMS messages, a media URL to the location of the media to be sent with the message. user_id: type: str description: - - User Id from Api account page. + - User ID from API account page. required: true api_token: type: str description: - - Api Token from Api account page. + - API Token from API account page. required: true api_secret: type: str description: - - Api Secret from Api account page. + - API Secret from API account page. required: true author: "Jonathan Mainguy (@Jmainguy)" notes: - - Will return changed even if the media url is wrong. - - Will return changed if the destination number is invalid. + - Will return changed even if the media URL is wrong. + - Will return changed if the destination number is invalid. +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Send a mms to multiple users community.general.catapult: src: "+15035555555" @@ -82,16 +91,7 @@ EXAMPLES = ''' user_id: "{{ user_id }}" api_token: "{{ api_token }}" api_secret: "{{ api_secret }}" - -''' - -RETURN = ''' -changed: - description: Whether the api accepted the message. - returned: always - type: bool - sample: true -''' +""" import json @@ -128,7 +128,7 @@ def main(): user_id=dict(required=True), api_token=dict(required=True, no_log=True), api_secret=dict(required=True, no_log=True), - media=dict(default=None, required=False), + media=dict(), ), ) diff --git a/plugins/modules/circonus_annotation.py b/plugins/modules/circonus_annotation.py index 661c854e6d..4d00b6fb98 100644 --- a/plugins/modules/circonus_annotation.py +++ b/plugins/modules/circonus_annotation.py @@ -1,63 +1,65 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (c) 2014-2015, Epic Games, Inc. # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: circonus_annotation -short_description: Create an annotation in circonus +short_description: Create an annotation in Circonus description: - - Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided + - Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided. author: "Nick Harring (@NickatEpic)" requirements: - - requests (either >= 2.0.0 for Python 3, or >= 1.0.0 for Python 2) -notes: - - Check mode isn't supported. + - requests >= 2.0.0 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - api_key: - type: str - description: - - Circonus API key - required: true - category: - type: str - description: - - Annotation Category - required: true + api_key: + type: str description: - type: str - description: - - Description of annotation - required: true - title: - type: str - description: - - Title of annotation - required: true - start: - type: int - description: - - Unix timestamp of event start - - If not specified, it defaults to I(now). - stop: - type: int - description: - - Unix timestamp of event end - - If not specified, it defaults to I(now) + I(duration). - duration: - type: int - description: - - Duration in seconds of annotation - default: 0 -''' -EXAMPLES = ''' + - Circonus API key. + required: true + category: + type: str + description: + - Annotation Category. + required: true + description: + type: str + description: + - Description of annotation. + required: true + title: + type: str + description: + - Title of annotation. + required: true + start: + type: int + description: + - Unix timestamp of event start. + - If not specified, it defaults to "now". + stop: + type: int + description: + - Unix timestamp of event end. + - If not specified, it defaults to "now" + O(duration). + duration: + type: int + description: + - Duration in seconds of annotation. + default: 0 +""" +EXAMPLES = r""" - name: Create a simple annotation event with a source, defaults to start and end time of now community.general.circonus_annotation: api_key: XXXXXXXXXXXXXXXXX @@ -81,66 +83,67 @@ EXAMPLES = ''' category: This category groups like annotations start_time: 1395940006 end_time: 1395954407 -''' +""" -RETURN = ''' +RETURN = r""" annotation: - description: details about the created annotation - returned: success - type: complex - contains: - _cid: - description: annotation identifier - returned: success - type: str - sample: /annotation/100000 - _created: - description: creation timestamp - returned: success - type: int - sample: 1502236928 - _last_modified: - description: last modification timestamp - returned: success - type: int - sample: 1502236928 - _last_modified_by: - description: last modified by - returned: success - type: str - sample: /user/1000 - category: - description: category of the created annotation - returned: success - type: str - sample: alerts - title: - description: title of the created annotation - returned: success - type: str - sample: WARNING - description: - description: description of the created annotation - returned: success - type: str - sample: Host is down. - start: - description: timestamp, since annotation applies - returned: success - type: int - sample: Host is down. - stop: - description: timestamp, since annotation ends - returned: success - type: str - sample: Host is down. - rel_metrics: - description: Array of metrics related to this annotation, each metrics is a string. - returned: success - type: list - sample: - - 54321_kbps -''' + description: Details about the created annotation. + returned: success + type: complex + contains: + _cid: + description: Annotation identifier. + returned: success + type: str + sample: /annotation/100000 + _created: + description: Creation timestamp. + returned: success + type: int + sample: 1502236928 + _last_modified: + description: Last modification timestamp. + returned: success + type: int + sample: 1502236928 + _last_modified_by: + description: Last modified by. + returned: success + type: str + sample: /user/1000 + category: + description: Category of the created annotation. + returned: success + type: str + sample: alerts + title: + description: Title of the created annotation. + returned: success + type: str + sample: WARNING + description: + description: Description of the created annotation. + returned: success + type: str + sample: Host is down. + start: + description: Timestamp, since annotation applies. + returned: success + type: int + sample: Host is down. + stop: + description: Timestamp, since annotation ends. + returned: success + type: str + sample: Host is down. + rel_metrics: + description: Array of metrics related to this annotation, each metrics is a string. + returned: success + type: list + sample: + - 54321_kbps +""" + import json import time import traceback @@ -156,7 +159,6 @@ except ImportError: HAS_REQUESTS = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.six import PY3 from ansible.module_utils.common.text.converters import to_native @@ -165,7 +167,7 @@ def check_requests_dep(module): if not HAS_REQUESTS: module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) else: - required_version = '2.0.0' if PY3 else '1.0.0' + required_version = '2.0.0' if LooseVersion(requests.__version__) < LooseVersion(required_version): module.fail_json(msg="'requests' library version should be >= %s, found: %s." % (required_version, requests.__version__)) diff --git a/plugins/modules/cisco_webex.py b/plugins/modules/cisco_webex.py index 95fcccb7d6..bd9c148b53 100644 --- a/plugins/modules/cisco_webex.py +++ b/plugins/modules/cisco_webex.py @@ -1,44 +1,48 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: cisco_webex short_description: Send a message to a Cisco Webex Teams Room or Individual description: - - Send a message to a Cisco Webex Teams Room or Individual with options to control the formatting. + - Send a message to a Cisco Webex Teams Room or Individual with options to control the formatting. author: Drew Rusell (@drew-russell) notes: - - The C(recipient_id) type must be valid for the supplied C(recipient_id). + - The O(recipient_type) must be valid for the supplied O(recipient_id). - Full API documentation can be found at U(https://developer.webex.com/docs/api/basics). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: recipient_type: description: - - The request parameter you would like to send the message to. - - Messages can be sent to either a room or individual (by ID or E-Mail). + - The request parameter you would like to send the message to. + - Messages can be sent to either a room or individual (by ID or E-Mail). required: true choices: ['roomId', 'toPersonEmail', 'toPersonId'] type: str recipient_id: description: - - The unique identifier associated with the supplied C(recipient_type). + - The unique identifier associated with the supplied O(recipient_type). required: true type: str msg_type: description: - - Specifies how you would like the message formatted. + - Specifies how you would like the message formatted. default: text choices: ['text', 'markdown'] type: str @@ -56,9 +60,9 @@ options: - The message you would like to send. required: true type: str -''' +""" -EXAMPLES = """ +EXAMPLES = r""" # Note: The following examples assume a variable file has been imported # that contains the appropriate information. @@ -93,10 +97,9 @@ EXAMPLES = """ msg_type: text personal_token: "{{ token }}" msg: "Cisco Webex Teams Ansible Module - Text Message to Individual by E-Mail" - """ -RETURN = """ +RETURN = r""" status_code: description: - The Response Code returned by the Webex Teams API. @@ -106,12 +109,12 @@ status_code: sample: 200 message: - description: - - The Response Message returned by the Webex Teams API. - - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics). - returned: always - type: str - sample: OK (585 bytes) + description: + - The Response Message returned by the Webex Teams API. + - Full Response Code explanations can be found at U(https://developer.webex.com/docs/api/basics). + returned: always + type: str + sample: OK (585 bytes) """ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url @@ -172,7 +175,7 @@ def main(): argument_spec=dict( recipient_type=dict(required=True, choices=['roomId', 'toPersonEmail', 'toPersonId']), recipient_id=dict(required=True, no_log=True), - msg_type=dict(required=False, default='text', aliases=['message_type'], choices=['text', 'markdown']), + msg_type=dict(default='text', aliases=['message_type'], choices=['text', 'markdown']), personal_token=dict(required=True, no_log=True, aliases=['token']), msg=dict(required=True), ), diff --git a/plugins/modules/clc_aa_policy.py b/plugins/modules/clc_aa_policy.py deleted file mode 100644 index d1fba2429a..0000000000 --- a/plugins/modules/clc_aa_policy.py +++ /dev/null @@ -1,346 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_aa_policy -short_description: Create or Delete Anti Affinity Policies at CenturyLink Cloud -description: - - An Ansible module to Create or Delete Anti Affinity Policies at CenturyLink Cloud. -options: - name: - description: - - The name of the Anti Affinity Policy. - type: str - required: true - location: - description: - - Datacenter in which the policy lives/should live. - type: str - required: true - state: - description: - - Whether to create or delete the policy. - type: str - required: false - default: present - choices: ['present','absent'] -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - ---- -- name: Create AA Policy - hosts: localhost - gather_facts: false - connection: local - tasks: - - name: Create an Anti Affinity Policy - community.general.clc_aa_policy: - name: Hammer Time - location: UK3 - state: present - register: policy - - - name: Debug - ansible.builtin.debug: - var: policy - -- name: Delete AA Policy - hosts: localhost - gather_facts: false - connection: local - tasks: - - name: Delete an Anti Affinity Policy - community.general.clc_aa_policy: - name: Hammer Time - location: UK3 - state: absent - register: policy - - - name: Debug - ansible.builtin.debug: - var: policy -''' - -RETURN = ''' -policy: - description: The anti affinity policy information - returned: success - type: dict - sample: - { - "id":"1a28dd0988984d87b9cd61fa8da15424", - "name":"test_aa_policy", - "location":"UC1", - "links":[ - { - "rel":"self", - "href":"/v2/antiAffinityPolicies/wfad/1a28dd0988984d87b9cd61fa8da15424", - "verbs":[ - "GET", - "DELETE", - "PUT" - ] - }, - { - "rel":"location", - "href":"/v2/datacenters/wfad/UC1", - "id":"uc1", - "name":"UC1 - US West (Santa Clara)" - } - ] - } -''' - -__version__ = '${version}' - -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk: -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcAntiAffinityPolicy: - - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - self.policy_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), - exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), - exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(required=True), - location=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - ) - return argument_spec - - # Module Behavior Goodness - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - p = self.module.params - - self._set_clc_credentials_from_env() - self.policy_dict = self._get_policies_for_datacenter(p) - - if p['state'] == "absent": - changed, policy = self._ensure_policy_is_absent(p) - else: - changed, policy = self._ensure_policy_is_present(p) - - if hasattr(policy, 'data'): - policy = policy.data - elif hasattr(policy, '__dict__'): - policy = policy.__dict__ - - self.module.exit_json(changed=changed, policy=policy) - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _get_policies_for_datacenter(self, p): - """ - Get the Policies for a datacenter by calling the CLC API. - :param p: datacenter to get policies from - :return: policies in the datacenter - """ - response = {} - - policies = self.clc.v2.AntiAffinity.GetAll(location=p['location']) - - for policy in policies: - response[policy.name] = policy - return response - - def _create_policy(self, p): - """ - Create an Anti Affinity Policy using the CLC API. - :param p: datacenter to create policy in - :return: response dictionary from the CLC API. - """ - try: - return self.clc.v2.AntiAffinity.Create( - name=p['name'], - location=p['location']) - except CLCException as ex: - self.module.fail_json(msg='Failed to create anti affinity policy : {0}. {1}'.format( - p['name'], ex.response_text - )) - - def _delete_policy(self, p): - """ - Delete an Anti Affinity Policy using the CLC API. - :param p: datacenter to delete a policy from - :return: none - """ - try: - policy = self.policy_dict[p['name']] - policy.Delete() - except CLCException as ex: - self.module.fail_json(msg='Failed to delete anti affinity policy : {0}. {1}'.format( - p['name'], ex.response_text - )) - - def _policy_exists(self, policy_name): - """ - Check to see if an Anti Affinity Policy exists - :param policy_name: name of the policy - :return: boolean of if the policy exists - """ - if policy_name in self.policy_dict: - return self.policy_dict.get(policy_name) - - return False - - def _ensure_policy_is_absent(self, p): - """ - Makes sure that a policy is absent - :param p: dictionary of policy name - :return: tuple of if a deletion occurred and the name of the policy that was deleted - """ - changed = False - if self._policy_exists(policy_name=p['name']): - changed = True - if not self.module.check_mode: - self._delete_policy(p) - return changed, None - - def _ensure_policy_is_present(self, p): - """ - Ensures that a policy is present - :param p: dictionary of a policy name - :return: tuple of if an addition occurred and the name of the policy that was added - """ - changed = False - policy = self._policy_exists(policy_name=p['name']) - if not policy: - changed = True - policy = None - if not self.module.check_mode: - policy = self._create_policy(p) - return changed, policy - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule( - argument_spec=ClcAntiAffinityPolicy._define_module_argument_spec(), - supports_check_mode=True) - clc_aa_policy = ClcAntiAffinityPolicy(module) - clc_aa_policy.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/clc_alert_policy.py b/plugins/modules/clc_alert_policy.py deleted file mode 100644 index 1d733013d2..0000000000 --- a/plugins/modules/clc_alert_policy.py +++ /dev/null @@ -1,529 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_alert_policy -short_description: Create or Delete Alert Policies at CenturyLink Cloud -description: - - An Ansible module to Create or Delete Alert Policies at CenturyLink Cloud. -options: - alias: - description: - - The alias of your CLC Account - type: str - required: true - name: - description: - - The name of the alert policy. This is mutually exclusive with id - type: str - id: - description: - - The alert policy id. This is mutually exclusive with name - type: str - alert_recipients: - description: - - A list of recipient email ids to notify the alert. - This is required for state 'present' - type: list - elements: str - metric: - description: - - The metric on which to measure the condition that will trigger the alert. - This is required for state 'present' - type: str - choices: ['cpu','memory','disk'] - duration: - description: - - The length of time in minutes that the condition must exceed the threshold. - This is required for state 'present' - type: str - threshold: - description: - - The threshold that will trigger the alert when the metric equals or exceeds it. - This is required for state 'present' - This number represents a percentage and must be a value between 5.0 - 95.0 that is a multiple of 5.0 - type: int - state: - description: - - Whether to create or delete the policy. - type: str - default: present - choices: ['present','absent'] -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - ---- -- name: Create Alert Policy Example - hosts: localhost - gather_facts: false - connection: local - tasks: - - name: Create an Alert Policy for disk above 80% for 5 minutes - community.general.clc_alert_policy: - alias: wfad - name: 'alert for disk > 80%' - alert_recipients: - - test1@centurylink.com - - test2@centurylink.com - metric: 'disk' - duration: '00:05:00' - threshold: 80 - state: present - register: policy - - - name: Debug - ansible.builtin.debug: var=policy - -- name: Delete Alert Policy Example - hosts: localhost - gather_facts: false - connection: local - tasks: - - name: Delete an Alert Policy - community.general.clc_alert_policy: - alias: wfad - name: 'alert for disk > 80%' - state: absent - register: policy - - - name: Debug - ansible.builtin.debug: var=policy -''' - -RETURN = ''' -policy: - description: The alert policy information - returned: success - type: dict - sample: - { - "actions": [ - { - "action": "email", - "settings": { - "recipients": [ - "user1@domain.com", - "user1@domain.com" - ] - } - } - ], - "id": "ba54ac54a60d4a4f1ed6d48c1ce240a7", - "links": [ - { - "href": "/v2/alertPolicies/alias/ba54ac54a60d4a4fb1d6d48c1ce240a7", - "rel": "self", - "verbs": [ - "GET", - "DELETE", - "PUT" - ] - } - ], - "name": "test_alert", - "triggers": [ - { - "duration": "00:05:00", - "metric": "disk", - "threshold": 80.0 - } - ] - } -''' - -__version__ = '${version}' - -import json -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import APIFailedResponse -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcAlertPolicy: - - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - self.policy_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(), - id=dict(), - alias=dict(required=True), - alert_recipients=dict(type='list', elements='str'), - metric=dict( - choices=[ - 'cpu', - 'memory', - 'disk']), - duration=dict(type='str'), - threshold=dict(type='int'), - state=dict(default='present', choices=['present', 'absent']) - ) - mutually_exclusive = [ - ['name', 'id'] - ] - return {'argument_spec': argument_spec, - 'mutually_exclusive': mutually_exclusive} - - # Module Behavior Goodness - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - p = self.module.params - - self._set_clc_credentials_from_env() - self.policy_dict = self._get_alert_policies(p['alias']) - - if p['state'] == 'present': - changed, policy = self._ensure_alert_policy_is_present() - else: - changed, policy = self._ensure_alert_policy_is_absent() - - self.module.exit_json(changed=changed, policy=policy) - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _ensure_alert_policy_is_present(self): - """ - Ensures that the alert policy is present - :return: (changed, policy) - changed: A flag representing if anything is modified - policy: the created/updated alert policy - """ - changed = False - p = self.module.params - policy_name = p.get('name') - - if not policy_name: - self.module.fail_json(msg='Policy name is a required') - policy = self._alert_policy_exists(policy_name) - if not policy: - changed = True - policy = None - if not self.module.check_mode: - policy = self._create_alert_policy() - else: - changed_u, policy = self._ensure_alert_policy_is_updated(policy) - if changed_u: - changed = True - return changed, policy - - def _ensure_alert_policy_is_absent(self): - """ - Ensures that the alert policy is absent - :return: (changed, None) - changed: A flag representing if anything is modified - """ - changed = False - p = self.module.params - alert_policy_id = p.get('id') - alert_policy_name = p.get('name') - alias = p.get('alias') - if not alert_policy_id and not alert_policy_name: - self.module.fail_json( - msg='Either alert policy id or policy name is required') - if not alert_policy_id and alert_policy_name: - alert_policy_id = self._get_alert_policy_id( - self.module, - alert_policy_name) - if alert_policy_id and alert_policy_id in self.policy_dict: - changed = True - if not self.module.check_mode: - self._delete_alert_policy(alias, alert_policy_id) - return changed, None - - def _ensure_alert_policy_is_updated(self, alert_policy): - """ - Ensures the alert policy is updated if anything is changed in the alert policy configuration - :param alert_policy: the target alert policy - :return: (changed, policy) - changed: A flag representing if anything is modified - policy: the updated the alert policy - """ - changed = False - p = self.module.params - alert_policy_id = alert_policy.get('id') - email_list = p.get('alert_recipients') - metric = p.get('metric') - duration = p.get('duration') - threshold = p.get('threshold') - policy = alert_policy - if (metric and metric != str(alert_policy.get('triggers')[0].get('metric'))) or \ - (duration and duration != str(alert_policy.get('triggers')[0].get('duration'))) or \ - (threshold and float(threshold) != float(alert_policy.get('triggers')[0].get('threshold'))): - changed = True - elif email_list: - t_email_list = list( - alert_policy.get('actions')[0].get('settings').get('recipients')) - if set(email_list) != set(t_email_list): - changed = True - if changed and not self.module.check_mode: - policy = self._update_alert_policy(alert_policy_id) - return changed, policy - - def _get_alert_policies(self, alias): - """ - Get the alert policies for account alias by calling the CLC API. - :param alias: the account alias - :return: the alert policies for the account alias - """ - response = {} - - policies = self.clc.v2.API.Call('GET', - '/v2/alertPolicies/%s' - % alias) - - for policy in policies.get('items'): - response[policy.get('id')] = policy - return response - - def _create_alert_policy(self): - """ - Create an alert Policy using the CLC API. - :return: response dictionary from the CLC API. - """ - p = self.module.params - alias = p['alias'] - email_list = p['alert_recipients'] - metric = p['metric'] - duration = p['duration'] - threshold = p['threshold'] - policy_name = p['name'] - arguments = json.dumps( - { - 'name': policy_name, - 'actions': [{ - 'action': 'email', - 'settings': { - 'recipients': email_list - } - }], - 'triggers': [{ - 'metric': metric, - 'duration': duration, - 'threshold': threshold - }] - } - ) - try: - result = self.clc.v2.API.Call( - 'POST', - '/v2/alertPolicies/%s' % alias, - arguments) - except APIFailedResponse as e: - return self.module.fail_json( - msg='Unable to create alert policy "{0}". {1}'.format( - policy_name, str(e.response_text))) - return result - - def _update_alert_policy(self, alert_policy_id): - """ - Update alert policy using the CLC API. - :param alert_policy_id: The clc alert policy id - :return: response dictionary from the CLC API. - """ - p = self.module.params - alias = p['alias'] - email_list = p['alert_recipients'] - metric = p['metric'] - duration = p['duration'] - threshold = p['threshold'] - policy_name = p['name'] - arguments = json.dumps( - { - 'name': policy_name, - 'actions': [{ - 'action': 'email', - 'settings': { - 'recipients': email_list - } - }], - 'triggers': [{ - 'metric': metric, - 'duration': duration, - 'threshold': threshold - }] - } - ) - try: - result = self.clc.v2.API.Call( - 'PUT', '/v2/alertPolicies/%s/%s' % - (alias, alert_policy_id), arguments) - except APIFailedResponse as e: - return self.module.fail_json( - msg='Unable to update alert policy "{0}". {1}'.format( - policy_name, str(e.response_text))) - return result - - def _delete_alert_policy(self, alias, policy_id): - """ - Delete an alert policy using the CLC API. - :param alias : the account alias - :param policy_id: the alert policy id - :return: response dictionary from the CLC API. - """ - try: - result = self.clc.v2.API.Call( - 'DELETE', '/v2/alertPolicies/%s/%s' % - (alias, policy_id), None) - except APIFailedResponse as e: - return self.module.fail_json( - msg='Unable to delete alert policy id "{0}". {1}'.format( - policy_id, str(e.response_text))) - return result - - def _alert_policy_exists(self, policy_name): - """ - Check to see if an alert policy exists - :param policy_name: name of the alert policy - :return: boolean of if the policy exists - """ - result = False - for policy_id in self.policy_dict: - if self.policy_dict.get(policy_id).get('name') == policy_name: - result = self.policy_dict.get(policy_id) - return result - - def _get_alert_policy_id(self, module, alert_policy_name): - """ - retrieves the alert policy id of the account based on the name of the policy - :param module: the AnsibleModule object - :param alert_policy_name: the alert policy name - :return: alert_policy_id: The alert policy id - """ - alert_policy_id = None - for policy_id in self.policy_dict: - if self.policy_dict.get(policy_id).get('name') == alert_policy_name: - if not alert_policy_id: - alert_policy_id = policy_id - else: - return module.fail_json( - msg='multiple alert policies were found with policy name : %s' % alert_policy_name) - return alert_policy_id - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - argument_dict = ClcAlertPolicy._define_module_argument_spec() - module = AnsibleModule(supports_check_mode=True, **argument_dict) - clc_alert_policy = ClcAlertPolicy(module) - clc_alert_policy.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/clc_blueprint_package.py b/plugins/modules/clc_blueprint_package.py deleted file mode 100644 index cb23df852b..0000000000 --- a/plugins/modules/clc_blueprint_package.py +++ /dev/null @@ -1,302 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_blueprint_package -short_description: Deploys a blue print package on a set of servers in CenturyLink Cloud -description: - - An Ansible module to deploy blue print package on a set of servers in CenturyLink Cloud. -options: - server_ids: - description: - - A list of server Ids to deploy the blue print package. - type: list - required: true - elements: str - package_id: - description: - - The package id of the blue print. - type: str - required: true - package_params: - description: - - The dictionary of arguments required to deploy the blue print. - type: dict - default: {} - required: false - state: - description: - - Whether to install or uninstall the package. Currently it supports only "present" for install action. - type: str - required: false - default: present - choices: ['present'] - wait: - description: - - Whether to wait for the tasks to finish before returning. - type: str - default: 'True' - required: false -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: Deploy package - community.general.clc_blueprint_package: - server_ids: - - UC1TEST-SERVER1 - - UC1TEST-SERVER2 - package_id: 77abb844-579d-478d-3955-c69ab4a7ba1a - package_params: {} -''' - -RETURN = ''' -server_ids: - description: The list of server ids that are changed - returned: success - type: list - sample: - [ - "UC1TEST-SERVER1", - "UC1TEST-SERVER2" - ] -''' - -__version__ = '${version}' - -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcBlueprintPackage: - - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - p = self.module.params - changed = False - changed_server_ids = [] - self._set_clc_credentials_from_env() - server_ids = p['server_ids'] - package_id = p['package_id'] - package_params = p['package_params'] - state = p['state'] - if state == 'present': - changed, changed_server_ids, request_list = self.ensure_package_installed( - server_ids, package_id, package_params) - self._wait_for_requests_to_complete(request_list) - self.module.exit_json(changed=changed, server_ids=changed_server_ids) - - @staticmethod - def define_argument_spec(): - """ - This function defines the dictionary object required for - package module - :return: the package dictionary object - """ - argument_spec = dict( - server_ids=dict(type='list', elements='str', required=True), - package_id=dict(required=True), - package_params=dict(type='dict', default={}), - wait=dict(default=True), # @FIXME should be bool? - state=dict(default='present', choices=['present']) - ) - return argument_spec - - def ensure_package_installed(self, server_ids, package_id, package_params): - """ - Ensure the package is installed in the given list of servers - :param server_ids: the server list where the package needs to be installed - :param package_id: the blueprint package id - :param package_params: the package arguments - :return: (changed, server_ids, request_list) - changed: A flag indicating if a change was made - server_ids: The list of servers modified - request_list: The list of request objects from clc-sdk - """ - changed = False - request_list = [] - servers = self._get_servers_from_clc( - server_ids, - 'Failed to get servers from CLC') - for server in servers: - if not self.module.check_mode: - request = self.clc_install_package( - server, - package_id, - package_params) - request_list.append(request) - changed = True - return changed, server_ids, request_list - - def clc_install_package(self, server, package_id, package_params): - """ - Install the package to a given clc server - :param server: The server object where the package needs to be installed - :param package_id: The blue print package id - :param package_params: the required argument dict for the package installation - :return: The result object from the CLC API call - """ - result = None - try: - result = server.ExecutePackage( - package_id=package_id, - parameters=package_params) - except CLCException as ex: - self.module.fail_json(msg='Failed to install package : {0} to server {1}. {2}'.format( - package_id, server.id, ex.message - )) - return result - - def _wait_for_requests_to_complete(self, request_lst): - """ - Waits until the CLC requests are complete if the wait argument is True - :param request_lst: The list of CLC request objects - :return: none - """ - if not self.module.params['wait']: - return - for request in request_lst: - request.WaitUntilComplete() - for request_details in request.requests: - if request_details.Status() != 'succeeded': - self.module.fail_json( - msg='Unable to process package install request') - - def _get_servers_from_clc(self, server_list, message): - """ - Internal function to fetch list of CLC server objects from a list of server ids - :param server_list: the list of server ids - :param message: the error message to raise if there is any error - :return the list of CLC server objects - """ - try: - return self.clc.v2.Servers(server_list).servers - except CLCException as ex: - self.module.fail_json(msg=message + ': %s' % ex) - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - Main function - :return: None - """ - module = AnsibleModule( - argument_spec=ClcBlueprintPackage.define_argument_spec(), - supports_check_mode=True - ) - clc_blueprint_package = ClcBlueprintPackage(module) - clc_blueprint_package.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/clc_firewall_policy.py b/plugins/modules/clc_firewall_policy.py deleted file mode 100644 index cc77238db9..0000000000 --- a/plugins/modules/clc_firewall_policy.py +++ /dev/null @@ -1,589 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_firewall_policy -short_description: Create/delete/update firewall policies -description: - - Create or delete or update firewall policies on Centurylink Cloud -options: - location: - description: - - Target datacenter for the firewall policy - type: str - required: true - state: - description: - - Whether to create or delete the firewall policy - type: str - default: present - choices: ['present', 'absent'] - source: - description: - - The list of source addresses for traffic on the originating firewall. - This is required when state is 'present' - type: list - elements: str - destination: - description: - - The list of destination addresses for traffic on the terminating firewall. - This is required when state is 'present' - type: list - elements: str - ports: - description: - - The list of ports associated with the policy. - TCP and UDP can take in single ports or port ranges. - - "Example: C(['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456'])." - type: list - elements: str - firewall_policy_id: - description: - - Id of the firewall policy. This is required to update or delete an existing firewall policy - type: str - source_account_alias: - description: - - CLC alias for the source account - type: str - required: true - destination_account_alias: - description: - - CLC alias for the destination account - type: str - wait: - description: - - Whether to wait for the provisioning tasks to finish before returning. - type: str - default: 'True' - enabled: - description: - - Whether the firewall policy is enabled or disabled - type: str - choices: ['True', 'False'] - default: 'True' -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' ---- -- name: Create Firewall Policy - hosts: localhost - gather_facts: false - connection: local - tasks: - - name: Create / Verify an Firewall Policy at CenturyLink Cloud - clc_firewall: - source_account_alias: WFAD - location: VA1 - state: present - source: 10.128.216.0/24 - destination: 10.128.216.0/24 - ports: Any - destination_account_alias: WFAD - -- name: Delete Firewall Policy - hosts: localhost - gather_facts: false - connection: local - tasks: - - name: Delete an Firewall Policy at CenturyLink Cloud - clc_firewall: - source_account_alias: WFAD - location: VA1 - state: absent - firewall_policy_id: c62105233d7a4231bd2e91b9c791e43e1 -''' - -RETURN = ''' -firewall_policy_id: - description: The fire wall policy id - returned: success - type: str - sample: fc36f1bfd47242e488a9c44346438c05 -firewall_policy: - description: The fire wall policy information - returned: success - type: dict - sample: - { - "destination":[ - "10.1.1.0/24", - "10.2.2.0/24" - ], - "destinationAccount":"wfad", - "enabled":true, - "id":"fc36f1bfd47242e488a9c44346438c05", - "links":[ - { - "href":"http://api.ctl.io/v2-experimental/firewallPolicies/wfad/uc1/fc36f1bfd47242e488a9c44346438c05", - "rel":"self", - "verbs":[ - "GET", - "PUT", - "DELETE" - ] - } - ], - "ports":[ - "any" - ], - "source":[ - "10.1.1.0/24", - "10.2.2.0/24" - ], - "status":"active" - } -''' - -__version__ = '${version}' - -import os -import traceback -from ansible.module_utils.six.moves.urllib.parse import urlparse -from time import sleep - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import APIFailedResponse -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcFirewallPolicy: - - clc = None - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - self.firewall_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - location=dict(required=True), - source_account_alias=dict(required=True), - destination_account_alias=dict(), - firewall_policy_id=dict(), - ports=dict(type='list', elements='str'), - source=dict(type='list', elements='str'), - destination=dict(type='list', elements='str'), - wait=dict(default=True), # @FIXME type=bool - state=dict(default='present', choices=['present', 'absent']), - enabled=dict(default=True, choices=[True, False]) - ) - return argument_spec - - def process_request(self): - """ - Execute the main code path, and handle the request - :return: none - """ - changed = False - firewall_policy = None - location = self.module.params.get('location') - source_account_alias = self.module.params.get('source_account_alias') - destination_account_alias = self.module.params.get( - 'destination_account_alias') - firewall_policy_id = self.module.params.get('firewall_policy_id') - ports = self.module.params.get('ports') - source = self.module.params.get('source') - destination = self.module.params.get('destination') - wait = self.module.params.get('wait') - state = self.module.params.get('state') - enabled = self.module.params.get('enabled') - - self.firewall_dict = { - 'location': location, - 'source_account_alias': source_account_alias, - 'destination_account_alias': destination_account_alias, - 'firewall_policy_id': firewall_policy_id, - 'ports': ports, - 'source': source, - 'destination': destination, - 'wait': wait, - 'state': state, - 'enabled': enabled} - - self._set_clc_credentials_from_env() - - if state == 'absent': - changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_absent( - source_account_alias, location, self.firewall_dict) - - elif state == 'present': - changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_present( - source_account_alias, location, self.firewall_dict) - - return self.module.exit_json( - changed=changed, - firewall_policy_id=firewall_policy_id, - firewall_policy=firewall_policy) - - @staticmethod - def _get_policy_id_from_response(response): - """ - Method to parse out the policy id from creation response - :param response: response from firewall creation API call - :return: policy_id: firewall policy id from creation call - """ - url = response.get('links')[0]['href'] - path = urlparse(url).path - path_list = os.path.split(path) - policy_id = path_list[-1] - return policy_id - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _ensure_firewall_policy_is_present( - self, - source_account_alias, - location, - firewall_dict): - """ - Ensures that a given firewall policy is present - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_dict: dictionary of request parameters for firewall policy - :return: (changed, firewall_policy_id, firewall_policy) - changed: flag for if a change occurred - firewall_policy_id: the firewall policy id that was created/updated - firewall_policy: The firewall_policy object - """ - firewall_policy = None - firewall_policy_id = firewall_dict.get('firewall_policy_id') - - if firewall_policy_id is None: - if not self.module.check_mode: - response = self._create_firewall_policy( - source_account_alias, - location, - firewall_dict) - firewall_policy_id = self._get_policy_id_from_response( - response) - changed = True - else: - firewall_policy = self._get_firewall_policy( - source_account_alias, location, firewall_policy_id) - if not firewall_policy: - return self.module.fail_json( - msg='Unable to find the firewall policy id : {0}'.format( - firewall_policy_id)) - changed = self._compare_get_request_with_dict( - firewall_policy, - firewall_dict) - if not self.module.check_mode and changed: - self._update_firewall_policy( - source_account_alias, - location, - firewall_policy_id, - firewall_dict) - if changed and firewall_policy_id: - firewall_policy = self._wait_for_requests_to_complete( - source_account_alias, - location, - firewall_policy_id) - return changed, firewall_policy_id, firewall_policy - - def _ensure_firewall_policy_is_absent( - self, - source_account_alias, - location, - firewall_dict): - """ - Ensures that a given firewall policy is removed if present - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_dict: firewall policy to delete - :return: (changed, firewall_policy_id, response) - changed: flag for if a change occurred - firewall_policy_id: the firewall policy id that was deleted - response: response from CLC API call - """ - changed = False - response = [] - firewall_policy_id = firewall_dict.get('firewall_policy_id') - result = self._get_firewall_policy( - source_account_alias, location, firewall_policy_id) - if result: - if not self.module.check_mode: - response = self._delete_firewall_policy( - source_account_alias, - location, - firewall_policy_id) - changed = True - return changed, firewall_policy_id, response - - def _create_firewall_policy( - self, - source_account_alias, - location, - firewall_dict): - """ - Creates the firewall policy for the given account alias - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_dict: dictionary of request parameters for firewall policy - :return: response from CLC API call - """ - payload = { - 'destinationAccount': firewall_dict.get('destination_account_alias'), - 'source': firewall_dict.get('source'), - 'destination': firewall_dict.get('destination'), - 'ports': firewall_dict.get('ports')} - try: - response = self.clc.v2.API.Call( - 'POST', '/v2-experimental/firewallPolicies/%s/%s' % - (source_account_alias, location), payload) - except APIFailedResponse as e: - return self.module.fail_json( - msg="Unable to create firewall policy. %s" % - str(e.response_text)) - return response - - def _delete_firewall_policy( - self, - source_account_alias, - location, - firewall_policy_id): - """ - Deletes a given firewall policy for an account alias in a datacenter - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_policy_id: firewall policy id to delete - :return: response: response from CLC API call - """ - try: - response = self.clc.v2.API.Call( - 'DELETE', '/v2-experimental/firewallPolicies/%s/%s/%s' % - (source_account_alias, location, firewall_policy_id)) - except APIFailedResponse as e: - return self.module.fail_json( - msg="Unable to delete the firewall policy id : {0}. {1}".format( - firewall_policy_id, str(e.response_text))) - return response - - def _update_firewall_policy( - self, - source_account_alias, - location, - firewall_policy_id, - firewall_dict): - """ - Updates a firewall policy for a given datacenter and account alias - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_policy_id: firewall policy id to update - :param firewall_dict: dictionary of request parameters for firewall policy - :return: response: response from CLC API call - """ - try: - response = self.clc.v2.API.Call( - 'PUT', - '/v2-experimental/firewallPolicies/%s/%s/%s' % - (source_account_alias, - location, - firewall_policy_id), - firewall_dict) - except APIFailedResponse as e: - return self.module.fail_json( - msg="Unable to update the firewall policy id : {0}. {1}".format( - firewall_policy_id, str(e.response_text))) - return response - - @staticmethod - def _compare_get_request_with_dict(response, firewall_dict): - """ - Helper method to compare the json response for getting the firewall policy with the request parameters - :param response: response from the get method - :param firewall_dict: dictionary of request parameters for firewall policy - :return: changed: Boolean that returns true if there are differences between - the response parameters and the playbook parameters - """ - - changed = False - - response_dest_account_alias = response.get('destinationAccount') - response_enabled = response.get('enabled') - response_source = response.get('source') - response_dest = response.get('destination') - response_ports = response.get('ports') - request_dest_account_alias = firewall_dict.get( - 'destination_account_alias') - request_enabled = firewall_dict.get('enabled') - if request_enabled is None: - request_enabled = True - request_source = firewall_dict.get('source') - request_dest = firewall_dict.get('destination') - request_ports = firewall_dict.get('ports') - - if ( - response_dest_account_alias and str(response_dest_account_alias) != str(request_dest_account_alias)) or ( - response_enabled != request_enabled) or ( - response_source and response_source != request_source) or ( - response_dest and response_dest != request_dest) or ( - response_ports and response_ports != request_ports): - changed = True - return changed - - def _get_firewall_policy( - self, - source_account_alias, - location, - firewall_policy_id): - """ - Get back details for a particular firewall policy - :param source_account_alias: the source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_policy_id: id of the firewall policy to get - :return: response - The response from CLC API call - """ - response = None - try: - response = self.clc.v2.API.Call( - 'GET', '/v2-experimental/firewallPolicies/%s/%s/%s' % - (source_account_alias, location, firewall_policy_id)) - except APIFailedResponse as e: - if e.response_status_code != 404: - self.module.fail_json( - msg="Unable to fetch the firewall policy with id : {0}. {1}".format( - firewall_policy_id, str(e.response_text))) - return response - - def _wait_for_requests_to_complete( - self, - source_account_alias, - location, - firewall_policy_id, - wait_limit=50): - """ - Waits until the CLC requests are complete if the wait argument is True - :param source_account_alias: The source account alias for the firewall policy - :param location: datacenter of the firewall policy - :param firewall_policy_id: The firewall policy id - :param wait_limit: The number of times to check the status for completion - :return: the firewall_policy object - """ - wait = self.module.params.get('wait') - count = 0 - firewall_policy = None - while wait: - count += 1 - firewall_policy = self._get_firewall_policy( - source_account_alias, location, firewall_policy_id) - status = firewall_policy.get('status') - if status == 'active' or count > wait_limit: - wait = False - else: - # wait for 2 seconds - sleep(2) - return firewall_policy - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule( - argument_spec=ClcFirewallPolicy._define_module_argument_spec(), - supports_check_mode=True) - - clc_firewall = ClcFirewallPolicy(module) - clc_firewall.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/clc_group.py b/plugins/modules/clc_group.py deleted file mode 100644 index 21e6d93d28..0000000000 --- a/plugins/modules/clc_group.py +++ /dev/null @@ -1,515 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_group -short_description: Create/delete Server Groups at Centurylink Cloud -description: - - Create or delete Server Groups at Centurylink Centurylink Cloud -options: - name: - description: - - The name of the Server Group - type: str - required: true - description: - description: - - A description of the Server Group - type: str - required: false - parent: - description: - - The parent group of the server group. If parent is not provided, it creates the group at top level. - type: str - required: false - location: - description: - - Datacenter to create the group in. If location is not provided, the group gets created in the default datacenter - associated with the account - type: str - required: false - state: - description: - - Whether to create or delete the group - type: str - default: present - choices: ['present', 'absent'] - wait: - description: - - Whether to wait for the tasks to finish before returning. - type: bool - default: true - required: false -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' - -# Create a Server Group - ---- -- name: Create Server Group - hosts: localhost - gather_facts: false - connection: local - tasks: - - name: Create / Verify a Server Group at CenturyLink Cloud - community.general.clc_group: - name: My Cool Server Group - parent: Default Group - state: present - register: clc - - - name: Debug - ansible.builtin.debug: - var: clc - -# Delete a Server Group -- name: Delete Server Group - hosts: localhost - gather_facts: false - connection: local - tasks: - - name: Delete / Verify Absent a Server Group at CenturyLink Cloud - community.general.clc_group: - name: My Cool Server Group - parent: Default Group - state: absent - register: clc - - - name: Debug - ansible.builtin.debug: - var: clc -''' - -RETURN = ''' -group: - description: The group information - returned: success - type: dict - sample: - { - "changeInfo":{ - "createdBy":"service.wfad", - "createdDate":"2015-07-29T18:52:47Z", - "modifiedBy":"service.wfad", - "modifiedDate":"2015-07-29T18:52:47Z" - }, - "customFields":[ - - ], - "description":"test group", - "groups":[ - - ], - "id":"bb5f12a3c6044ae4ad0a03e73ae12cd1", - "links":[ - { - "href":"/v2/groups/wfad", - "rel":"createGroup", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/servers/wfad", - "rel":"createServer", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1", - "rel":"self", - "verbs":[ - "GET", - "PATCH", - "DELETE" - ] - }, - { - "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0", - "id":"086ac1dfe0b6411989e8d1b77c4065f0", - "rel":"parentGroup" - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/defaults", - "rel":"defaults", - "verbs":[ - "GET", - "POST" - ] - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/billing", - "rel":"billing" - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/archive", - "rel":"archiveGroupAction" - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/statistics", - "rel":"statistics" - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/upcomingScheduledActivities", - "rel":"upcomingScheduledActivities" - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/horizontalAutoscalePolicy", - "rel":"horizontalAutoscalePolicyMapping", - "verbs":[ - "GET", - "PUT", - "DELETE" - ] - }, - { - "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/scheduledActivities", - "rel":"scheduledActivities", - "verbs":[ - "GET", - "POST" - ] - } - ], - "locationId":"UC1", - "name":"test group", - "status":"active", - "type":"default" - } -''' - -__version__ = '${version}' - -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcGroup(object): - - clc = None - root_group = None - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - self.group_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Execute the main code path, and handle the request - :return: none - """ - location = self.module.params.get('location') - group_name = self.module.params.get('name') - parent_name = self.module.params.get('parent') - group_description = self.module.params.get('description') - state = self.module.params.get('state') - - self._set_clc_credentials_from_env() - self.group_dict = self._get_group_tree_for_datacenter( - datacenter=location) - - if state == "absent": - changed, group, requests = self._ensure_group_is_absent( - group_name=group_name, parent_name=parent_name) - if requests: - self._wait_for_requests_to_complete(requests) - else: - changed, group = self._ensure_group_is_present( - group_name=group_name, parent_name=parent_name, group_description=group_description) - try: - group = group.data - except AttributeError: - group = group_name - self.module.exit_json(changed=changed, group=group) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(required=True), - description=dict(), - parent=dict(), - location=dict(), - state=dict(default='present', choices=['present', 'absent']), - wait=dict(type='bool', default=True)) - - return argument_spec - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _ensure_group_is_absent(self, group_name, parent_name): - """ - Ensure that group_name is absent by deleting it if necessary - :param group_name: string - the name of the clc server group to delete - :param parent_name: string - the name of the parent group for group_name - :return: changed, group - """ - changed = False - group = [] - results = [] - - if self._group_exists(group_name=group_name, parent_name=parent_name): - if not self.module.check_mode: - group.append(group_name) - result = self._delete_group(group_name) - results.append(result) - changed = True - return changed, group, results - - def _delete_group(self, group_name): - """ - Delete the provided server group - :param group_name: string - the server group to delete - :return: none - """ - response = None - group, parent = self.group_dict.get(group_name) - try: - response = group.Delete() - except CLCException as ex: - self.module.fail_json(msg='Failed to delete group :{0}. {1}'.format( - group_name, ex.response_text - )) - return response - - def _ensure_group_is_present( - self, - group_name, - parent_name, - group_description): - """ - Checks to see if a server group exists, creates it if it doesn't. - :param group_name: the name of the group to validate/create - :param parent_name: the name of the parent group for group_name - :param group_description: a short description of the server group (used when creating) - :return: (changed, group) - - changed: Boolean- whether a change was made, - group: A clc group object for the group - """ - if not self.root_group: - raise AssertionError("Implementation Error: Root Group not set") - parent = parent_name if parent_name is not None else self.root_group.name - description = group_description - changed = False - group = group_name - - parent_exists = self._group_exists(group_name=parent, parent_name=None) - child_exists = self._group_exists( - group_name=group_name, - parent_name=parent) - - if parent_exists and child_exists: - group, parent = self.group_dict[group_name] - changed = False - elif parent_exists and not child_exists: - if not self.module.check_mode: - group = self._create_group( - group=group, - parent=parent, - description=description) - changed = True - else: - self.module.fail_json( - msg="parent group: " + - parent + - " does not exist") - - return changed, group - - def _create_group(self, group, parent, description): - """ - Create the provided server group - :param group: clc_sdk.Group - the group to create - :param parent: clc_sdk.Parent - the parent group for {group} - :param description: string - a text description of the group - :return: clc_sdk.Group - the created group - """ - response = None - (parent, grandparent) = self.group_dict[parent] - try: - response = parent.Create(name=group, description=description) - except CLCException as ex: - self.module.fail_json(msg='Failed to create group :{0}. {1}'.format( - group, ex.response_text)) - return response - - def _group_exists(self, group_name, parent_name): - """ - Check to see if a group exists - :param group_name: string - the group to check - :param parent_name: string - the parent of group_name - :return: boolean - whether the group exists - """ - result = False - if group_name in self.group_dict: - (group, parent) = self.group_dict[group_name] - if parent_name is None or parent_name == parent.name: - result = True - return result - - def _get_group_tree_for_datacenter(self, datacenter=None): - """ - Walk the tree of groups for a datacenter - :param datacenter: string - the datacenter to walk (ex: 'UC1') - :return: a dictionary of groups and parents - """ - self.root_group = self.clc.v2.Datacenter( - location=datacenter).RootGroup() - return self._walk_groups_recursive( - parent_group=None, - child_group=self.root_group) - - def _walk_groups_recursive(self, parent_group, child_group): - """ - Walk a parent-child tree of groups, starting with the provided child group - :param parent_group: clc_sdk.Group - the parent group to start the walk - :param child_group: clc_sdk.Group - the child group to start the walk - :return: a dictionary of groups and parents - """ - result = {str(child_group): (child_group, parent_group)} - groups = child_group.Subgroups().groups - if len(groups) > 0: - for group in groups: - if group.type != 'default': - continue - - result.update(self._walk_groups_recursive(child_group, group)) - return result - - def _wait_for_requests_to_complete(self, requests_lst): - """ - Waits until the CLC requests are complete if the wait argument is True - :param requests_lst: The list of CLC request objects - :return: none - """ - if not self.module.params['wait']: - return - for request in requests_lst: - request.WaitUntilComplete() - for request_details in request.requests: - if request_details.Status() != 'succeeded': - self.module.fail_json( - msg='Unable to process group request') - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule( - argument_spec=ClcGroup._define_module_argument_spec(), - supports_check_mode=True) - - clc_group = ClcGroup(module) - clc_group.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/clc_loadbalancer.py b/plugins/modules/clc_loadbalancer.py deleted file mode 100644 index ab6d866fb6..0000000000 --- a/plugins/modules/clc_loadbalancer.py +++ /dev/null @@ -1,938 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_loadbalancer -short_description: Create, Delete shared loadbalancers in CenturyLink Cloud -description: - - An Ansible module to Create, Delete shared loadbalancers in CenturyLink Cloud. -options: - name: - description: - - The name of the loadbalancer - type: str - required: true - description: - description: - - A description for the loadbalancer - type: str - alias: - description: - - The alias of your CLC Account - type: str - required: true - location: - description: - - The location of the datacenter where the load balancer resides in - type: str - required: true - method: - description: - -The balancing method for the load balancer pool - type: str - choices: ['leastConnection', 'roundRobin'] - persistence: - description: - - The persistence method for the load balancer - type: str - choices: ['standard', 'sticky'] - port: - description: - - Port to configure on the public-facing side of the load balancer pool - type: str - choices: ['80', '443'] - nodes: - description: - - A list of nodes that needs to be added to the load balancer pool - type: list - default: [] - elements: dict - status: - description: - - The status of the loadbalancer - type: str - default: enabled - choices: ['enabled', 'disabled'] - state: - description: - - Whether to create or delete the load balancer pool - type: str - default: present - choices: ['present', 'absent', 'port_absent', 'nodes_present', 'nodes_absent'] -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples -- name: Create Loadbalancer - hosts: localhost - connection: local - tasks: - - name: Actually Create things - community.general.clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - ipAddress: 10.11.22.123 - privatePort: 80 - state: present - -- name: Add node to an existing loadbalancer pool - hosts: localhost - connection: local - tasks: - - name: Actually Create things - community.general.clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - ipAddress: 10.11.22.234 - privatePort: 80 - state: nodes_present - -- name: Remove node from an existing loadbalancer pool - hosts: localhost - connection: local - tasks: - - name: Actually Create things - community.general.clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - ipAddress: 10.11.22.234 - privatePort: 80 - state: nodes_absent - -- name: Delete LoadbalancerPool - hosts: localhost - connection: local - tasks: - - name: Actually Delete things - community.general.clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - ipAddress: 10.11.22.123 - privatePort: 80 - state: port_absent - -- name: Delete Loadbalancer - hosts: localhost - connection: local - tasks: - - name: Actually Delete things - community.general.clc_loadbalancer: - name: test - description: test - alias: TEST - location: WA1 - port: 443 - nodes: - - ipAddress: 10.11.22.123 - privatePort: 80 - state: absent -''' - -RETURN = ''' -loadbalancer: - description: The load balancer result object from CLC - returned: success - type: dict - sample: - { - "description":"test-lb", - "id":"ab5b18cb81e94ab9925b61d1ca043fb5", - "ipAddress":"66.150.174.197", - "links":[ - { - "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5", - "rel":"self", - "verbs":[ - "GET", - "PUT", - "DELETE" - ] - }, - { - "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5/pools", - "rel":"pools", - "verbs":[ - "GET", - "POST" - ] - } - ], - "name":"test-lb", - "pools":[ - - ], - "status":"enabled" - } -''' - -__version__ = '${version}' - -import json -import os -import traceback -from time import sleep - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import APIFailedResponse -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcLoadBalancer: - - clc = None - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - self.lb_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Execute the main code path, and handle the request - :return: none - """ - changed = False - result_lb = None - loadbalancer_name = self.module.params.get('name') - loadbalancer_alias = self.module.params.get('alias') - loadbalancer_location = self.module.params.get('location') - loadbalancer_description = self.module.params.get('description') - loadbalancer_port = self.module.params.get('port') - loadbalancer_method = self.module.params.get('method') - loadbalancer_persistence = self.module.params.get('persistence') - loadbalancer_nodes = self.module.params.get('nodes') - loadbalancer_status = self.module.params.get('status') - state = self.module.params.get('state') - - if loadbalancer_description is None: - loadbalancer_description = loadbalancer_name - - self._set_clc_credentials_from_env() - - self.lb_dict = self._get_loadbalancer_list( - alias=loadbalancer_alias, - location=loadbalancer_location) - - if state == 'present': - changed, result_lb, lb_id = self.ensure_loadbalancer_present( - name=loadbalancer_name, - alias=loadbalancer_alias, - location=loadbalancer_location, - description=loadbalancer_description, - status=loadbalancer_status) - if loadbalancer_port: - changed, result_pool, pool_id = self.ensure_loadbalancerpool_present( - lb_id=lb_id, - alias=loadbalancer_alias, - location=loadbalancer_location, - method=loadbalancer_method, - persistence=loadbalancer_persistence, - port=loadbalancer_port) - - if loadbalancer_nodes: - changed, result_nodes = self.ensure_lbpool_nodes_set( - alias=loadbalancer_alias, - location=loadbalancer_location, - name=loadbalancer_name, - port=loadbalancer_port, - nodes=loadbalancer_nodes) - elif state == 'absent': - changed, result_lb = self.ensure_loadbalancer_absent( - name=loadbalancer_name, - alias=loadbalancer_alias, - location=loadbalancer_location) - - elif state == 'port_absent': - changed, result_lb = self.ensure_loadbalancerpool_absent( - alias=loadbalancer_alias, - location=loadbalancer_location, - name=loadbalancer_name, - port=loadbalancer_port) - - elif state == 'nodes_present': - changed, result_lb = self.ensure_lbpool_nodes_present( - alias=loadbalancer_alias, - location=loadbalancer_location, - name=loadbalancer_name, - port=loadbalancer_port, - nodes=loadbalancer_nodes) - - elif state == 'nodes_absent': - changed, result_lb = self.ensure_lbpool_nodes_absent( - alias=loadbalancer_alias, - location=loadbalancer_location, - name=loadbalancer_name, - port=loadbalancer_port, - nodes=loadbalancer_nodes) - - self.module.exit_json(changed=changed, loadbalancer=result_lb) - - def ensure_loadbalancer_present( - self, name, alias, location, description, status): - """ - Checks to see if a load balancer exists and creates one if it does not. - :param name: Name of loadbalancer - :param alias: Alias of account - :param location: Datacenter - :param description: Description of loadbalancer - :param status: Enabled / Disabled - :return: (changed, result, lb_id) - changed: Boolean whether a change was made - result: The result object from the CLC load balancer request - lb_id: The load balancer id - """ - changed = False - result = name - lb_id = self._loadbalancer_exists(name=name) - if not lb_id: - if not self.module.check_mode: - result = self.create_loadbalancer(name=name, - alias=alias, - location=location, - description=description, - status=status) - lb_id = result.get('id') - changed = True - - return changed, result, lb_id - - def ensure_loadbalancerpool_present( - self, lb_id, alias, location, method, persistence, port): - """ - Checks to see if a load balancer pool exists and creates one if it does not. - :param lb_id: The loadbalancer id - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param method: the load balancing method - :param persistence: the load balancing persistence type - :param port: the port that the load balancer will listen on - :return: (changed, group, pool_id) - - changed: Boolean whether a change was made - result: The result from the CLC API call - pool_id: The string id of the load balancer pool - """ - changed = False - result = port - if not lb_id: - return changed, None, None - pool_id = self._loadbalancerpool_exists( - alias=alias, - location=location, - port=port, - lb_id=lb_id) - if not pool_id: - if not self.module.check_mode: - result = self.create_loadbalancerpool( - alias=alias, - location=location, - lb_id=lb_id, - method=method, - persistence=persistence, - port=port) - pool_id = result.get('id') - changed = True - - return changed, result, pool_id - - def ensure_loadbalancer_absent(self, name, alias, location): - """ - Checks to see if a load balancer exists and deletes it if it does - :param name: Name of the load balancer - :param alias: Alias of account - :param location: Datacenter - :return: (changed, result) - changed: Boolean whether a change was made - result: The result from the CLC API Call - """ - changed = False - result = name - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - if not self.module.check_mode: - result = self.delete_loadbalancer(alias=alias, - location=location, - name=name) - changed = True - return changed, result - - def ensure_loadbalancerpool_absent(self, alias, location, name, port): - """ - Checks to see if a load balancer pool exists and deletes it if it does - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param name: the name of the load balancer - :param port: the port that the load balancer listens on - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - result = None - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - lb_id = self._get_loadbalancer_id(name=name) - pool_id = self._loadbalancerpool_exists( - alias=alias, - location=location, - port=port, - lb_id=lb_id) - if pool_id: - changed = True - if not self.module.check_mode: - result = self.delete_loadbalancerpool( - alias=alias, - location=location, - lb_id=lb_id, - pool_id=pool_id) - else: - result = "Pool doesn't exist" - else: - result = "LB Doesn't Exist" - return changed, result - - def ensure_lbpool_nodes_set(self, alias, location, name, port, nodes): - """ - Checks to see if the provided list of nodes exist for the pool - and set the nodes if any in the list those doesn't exist - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param name: the name of the load balancer - :param port: the port that the load balancer will listen on - :param nodes: The list of nodes to be updated to the pool - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - result = {} - changed = False - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - lb_id = self._get_loadbalancer_id(name=name) - pool_id = self._loadbalancerpool_exists( - alias=alias, - location=location, - port=port, - lb_id=lb_id) - if pool_id: - nodes_exist = self._loadbalancerpool_nodes_exists(alias=alias, - location=location, - lb_id=lb_id, - pool_id=pool_id, - nodes_to_check=nodes) - if not nodes_exist: - changed = True - result = self.set_loadbalancernodes(alias=alias, - location=location, - lb_id=lb_id, - pool_id=pool_id, - nodes=nodes) - else: - result = "Pool doesn't exist" - else: - result = "Load balancer doesn't Exist" - return changed, result - - def ensure_lbpool_nodes_present(self, alias, location, name, port, nodes): - """ - Checks to see if the provided list of nodes exist for the pool and add the missing nodes to the pool - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param name: the name of the load balancer - :param port: the port that the load balancer will listen on - :param nodes: the list of nodes to be added - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - lb_id = self._get_loadbalancer_id(name=name) - pool_id = self._loadbalancerpool_exists( - alias=alias, - location=location, - port=port, - lb_id=lb_id) - if pool_id: - changed, result = self.add_lbpool_nodes(alias=alias, - location=location, - lb_id=lb_id, - pool_id=pool_id, - nodes_to_add=nodes) - else: - result = "Pool doesn't exist" - else: - result = "Load balancer doesn't Exist" - return changed, result - - def ensure_lbpool_nodes_absent(self, alias, location, name, port, nodes): - """ - Checks to see if the provided list of nodes exist for the pool and removes them if found any - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param name: the name of the load balancer - :param port: the port that the load balancer will listen on - :param nodes: the list of nodes to be removed - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - lb_exists = self._loadbalancer_exists(name=name) - if lb_exists: - lb_id = self._get_loadbalancer_id(name=name) - pool_id = self._loadbalancerpool_exists( - alias=alias, - location=location, - port=port, - lb_id=lb_id) - if pool_id: - changed, result = self.remove_lbpool_nodes(alias=alias, - location=location, - lb_id=lb_id, - pool_id=pool_id, - nodes_to_remove=nodes) - else: - result = "Pool doesn't exist" - else: - result = "Load balancer doesn't Exist" - return changed, result - - def create_loadbalancer(self, name, alias, location, description, status): - """ - Create a loadbalancer w/ params - :param name: Name of loadbalancer - :param alias: Alias of account - :param location: Datacenter - :param description: Description for loadbalancer to be created - :param status: Enabled / Disabled - :return: result: The result from the CLC API call - """ - result = None - try: - result = self.clc.v2.API.Call('POST', - '/v2/sharedLoadBalancers/%s/%s' % (alias, - location), - json.dumps({"name": name, - "description": description, - "status": status})) - sleep(1) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to create load balancer "{0}". {1}'.format( - name, str(e.response_text))) - return result - - def create_loadbalancerpool( - self, alias, location, lb_id, method, persistence, port): - """ - Creates a pool on the provided load balancer - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param method: the load balancing method - :param persistence: the load balancing persistence type - :param port: the port that the load balancer will listen on - :return: result: The result from the create API call - """ - result = None - try: - result = self.clc.v2.API.Call( - 'POST', '/v2/sharedLoadBalancers/%s/%s/%s/pools' % - (alias, location, lb_id), json.dumps( - { - "port": port, "method": method, "persistence": persistence - })) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to create pool for load balancer id "{0}". {1}'.format( - lb_id, str(e.response_text))) - return result - - def delete_loadbalancer(self, alias, location, name): - """ - Delete CLC loadbalancer - :param alias: Alias for account - :param location: Datacenter - :param name: Name of the loadbalancer to delete - :return: result: The result from the CLC API call - """ - result = None - lb_id = self._get_loadbalancer_id(name=name) - try: - result = self.clc.v2.API.Call( - 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s' % - (alias, location, lb_id)) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to delete load balancer "{0}". {1}'.format( - name, str(e.response_text))) - return result - - def delete_loadbalancerpool(self, alias, location, lb_id, pool_id): - """ - Delete the pool on the provided load balancer - :param alias: The account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the load balancer pool - :return: result: The result from the delete API call - """ - result = None - try: - result = self.clc.v2.API.Call( - 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s' % - (alias, location, lb_id, pool_id)) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to delete pool for load balancer id "{0}". {1}'.format( - lb_id, str(e.response_text))) - return result - - def _get_loadbalancer_id(self, name): - """ - Retrieves unique ID of loadbalancer - :param name: Name of loadbalancer - :return: Unique ID of the loadbalancer - """ - id = None - for lb in self.lb_dict: - if lb.get('name') == name: - id = lb.get('id') - return id - - def _get_loadbalancer_list(self, alias, location): - """ - Retrieve a list of loadbalancers - :param alias: Alias for account - :param location: Datacenter - :return: JSON data for all loadbalancers at datacenter - """ - result = None - try: - result = self.clc.v2.API.Call( - 'GET', '/v2/sharedLoadBalancers/%s/%s' % (alias, location)) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to fetch load balancers for account: {0}. {1}'.format( - alias, str(e.response_text))) - return result - - def _loadbalancer_exists(self, name): - """ - Verify a loadbalancer exists - :param name: Name of loadbalancer - :return: False or the ID of the existing loadbalancer - """ - result = False - - for lb in self.lb_dict: - if lb.get('name') == name: - result = lb.get('id') - return result - - def _loadbalancerpool_exists(self, alias, location, port, lb_id): - """ - Checks to see if a pool exists on the specified port on the provided load balancer - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param port: the port to check and see if it exists - :param lb_id: the id string of the provided load balancer - :return: result: The id string of the pool or False - """ - result = False - try: - pool_list = self.clc.v2.API.Call( - 'GET', '/v2/sharedLoadBalancers/%s/%s/%s/pools' % - (alias, location, lb_id)) - except APIFailedResponse as e: - return self.module.fail_json( - msg='Unable to fetch the load balancer pools for for load balancer id: {0}. {1}'.format( - lb_id, str(e.response_text))) - for pool in pool_list: - if int(pool.get('port')) == int(port): - result = pool.get('id') - return result - - def _loadbalancerpool_nodes_exists( - self, alias, location, lb_id, pool_id, nodes_to_check): - """ - Checks to see if a set of nodes exists on the specified port on the provided load balancer - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the provided load balancer - :param pool_id: the id string of the load balancer pool - :param nodes_to_check: the list of nodes to check for - :return: result: True / False indicating if the given nodes exist - """ - result = False - nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id) - for node in nodes_to_check: - if not node.get('status'): - node['status'] = 'enabled' - if node in nodes: - result = True - else: - result = False - return result - - def set_loadbalancernodes(self, alias, location, lb_id, pool_id, nodes): - """ - Updates nodes to the provided pool - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the pool - :param nodes: a list of dictionaries containing the nodes to set - :return: result: The result from the CLC API call - """ - result = None - if not lb_id: - return result - if not self.module.check_mode: - try: - result = self.clc.v2.API.Call('PUT', - '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes' - % (alias, location, lb_id, pool_id), json.dumps(nodes)) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to set nodes for the load balancer pool id "{0}". {1}'.format( - pool_id, str(e.response_text))) - return result - - def add_lbpool_nodes(self, alias, location, lb_id, pool_id, nodes_to_add): - """ - Add nodes to the provided pool - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the pool - :param nodes_to_add: a list of dictionaries containing the nodes to add - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - result = {} - nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id) - for node in nodes_to_add: - if not node.get('status'): - node['status'] = 'enabled' - if node not in nodes: - changed = True - nodes.append(node) - if changed is True and not self.module.check_mode: - result = self.set_loadbalancernodes( - alias, - location, - lb_id, - pool_id, - nodes) - return changed, result - - def remove_lbpool_nodes( - self, alias, location, lb_id, pool_id, nodes_to_remove): - """ - Removes nodes from the provided pool - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the pool - :param nodes_to_remove: a list of dictionaries containing the nodes to remove - :return: (changed, result) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - result = {} - nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id) - for node in nodes_to_remove: - if not node.get('status'): - node['status'] = 'enabled' - if node in nodes: - changed = True - nodes.remove(node) - if changed is True and not self.module.check_mode: - result = self.set_loadbalancernodes( - alias, - location, - lb_id, - pool_id, - nodes) - return changed, result - - def _get_lbpool_nodes(self, alias, location, lb_id, pool_id): - """ - Return the list of nodes available to the provided load balancer pool - :param alias: the account alias - :param location: the datacenter the load balancer resides in - :param lb_id: the id string of the load balancer - :param pool_id: the id string of the pool - :return: result: The list of nodes - """ - result = None - try: - result = self.clc.v2.API.Call('GET', - '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes' - % (alias, location, lb_id, pool_id)) - except APIFailedResponse as e: - self.module.fail_json( - msg='Unable to fetch list of available nodes for load balancer pool id: {0}. {1}'.format( - pool_id, str(e.response_text))) - return result - - @staticmethod - def define_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(required=True), - description=dict(), - location=dict(required=True), - alias=dict(required=True), - port=dict(choices=[80, 443]), - method=dict(choices=['leastConnection', 'roundRobin']), - persistence=dict(choices=['standard', 'sticky']), - nodes=dict(type='list', default=[], elements='dict'), - status=dict(default='enabled', choices=['enabled', 'disabled']), - state=dict( - default='present', - choices=[ - 'present', - 'absent', - 'port_absent', - 'nodes_present', - 'nodes_absent']) - ) - return argument_spec - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule(argument_spec=ClcLoadBalancer.define_argument_spec(), - supports_check_mode=True) - clc_loadbalancer = ClcLoadBalancer(module) - clc_loadbalancer.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/clc_modify_server.py b/plugins/modules/clc_modify_server.py deleted file mode 100644 index 786cdf2ae4..0000000000 --- a/plugins/modules/clc_modify_server.py +++ /dev/null @@ -1,968 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_modify_server -short_description: Modify servers in CenturyLink Cloud -description: - - An Ansible module to modify servers in CenturyLink Cloud. -options: - server_ids: - description: - - A list of server Ids to modify. - type: list - required: true - elements: str - cpu: - description: - - How many CPUs to update on the server - type: str - memory: - description: - - Memory (in GB) to set to the server. - type: str - anti_affinity_policy_id: - description: - - The anti affinity policy id to be set for a hyper scale server. - This is mutually exclusive with 'anti_affinity_policy_name' - type: str - anti_affinity_policy_name: - description: - - The anti affinity policy name to be set for a hyper scale server. - This is mutually exclusive with 'anti_affinity_policy_id' - type: str - alert_policy_id: - description: - - The alert policy id to be associated to the server. - This is mutually exclusive with 'alert_policy_name' - type: str - alert_policy_name: - description: - - The alert policy name to be associated to the server. - This is mutually exclusive with 'alert_policy_id' - type: str - state: - description: - - The state to insure that the provided resources are in. - type: str - default: 'present' - choices: ['present', 'absent'] - wait: - description: - - Whether to wait for the provisioning tasks to finish before returning. - type: bool - default: true -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: Set the cpu count to 4 on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - cpu: 4 - state: present - -- name: Set the memory to 8GB on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - memory: 8 - state: present - -- name: Set the anti affinity policy on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - anti_affinity_policy_name: 'aa_policy' - state: present - -- name: Remove the anti affinity policy on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - anti_affinity_policy_name: 'aa_policy' - state: absent - -- name: Add the alert policy on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - alert_policy_name: 'alert_policy' - state: present - -- name: Remove the alert policy on a server - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - alert_policy_name: 'alert_policy' - state: absent - -- name: Ret the memory to 16GB and cpu to 8 core on a lust if servers - community.general.clc_modify_server: - server_ids: - - UC1TESTSVR01 - - UC1TESTSVR02 - cpu: 8 - memory: 16 - state: present -''' - -RETURN = ''' -server_ids: - description: The list of server ids that are changed - returned: success - type: list - sample: - [ - "UC1TEST-SVR01", - "UC1TEST-SVR02" - ] -servers: - description: The list of server objects that are changed - returned: success - type: list - sample: - [ - { - "changeInfo":{ - "createdBy":"service.wfad", - "createdDate":1438196820, - "modifiedBy":"service.wfad", - "modifiedDate":1438196820 - }, - "description":"test-server", - "details":{ - "alertPolicies":[ - - ], - "cpu":1, - "customFields":[ - - ], - "diskCount":3, - "disks":[ - { - "id":"0:0", - "partitionPaths":[ - - ], - "sizeGB":1 - }, - { - "id":"0:1", - "partitionPaths":[ - - ], - "sizeGB":2 - }, - { - "id":"0:2", - "partitionPaths":[ - - ], - "sizeGB":14 - } - ], - "hostName":"", - "inMaintenanceMode":false, - "ipAddresses":[ - { - "internal":"10.1.1.1" - } - ], - "memoryGB":1, - "memoryMB":1024, - "partitions":[ - - ], - "powerState":"started", - "snapshots":[ - - ], - "storageGB":17 - }, - "groupId":"086ac1dfe0b6411989e8d1b77c4065f0", - "id":"test-server", - "ipaddress":"10.120.45.23", - "isTemplate":false, - "links":[ - { - "href":"/v2/servers/wfad/test-server", - "id":"test-server", - "rel":"self", - "verbs":[ - "GET", - "PATCH", - "DELETE" - ] - }, - { - "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0", - "id":"086ac1dfe0b6411989e8d1b77c4065f0", - "rel":"group" - }, - { - "href":"/v2/accounts/wfad", - "id":"wfad", - "rel":"account" - }, - { - "href":"/v2/billing/wfad/serverPricing/test-server", - "rel":"billing" - }, - { - "href":"/v2/servers/wfad/test-server/publicIPAddresses", - "rel":"publicIPAddresses", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/credentials", - "rel":"credentials" - }, - { - "href":"/v2/servers/wfad/test-server/statistics", - "rel":"statistics" - }, - { - "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities", - "rel":"upcomingScheduledActivities" - }, - { - "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities", - "rel":"scheduledActivities", - "verbs":[ - "GET", - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/capabilities", - "rel":"capabilities" - }, - { - "href":"/v2/servers/wfad/test-server/alertPolicies", - "rel":"alertPolicyMappings", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/antiAffinityPolicy", - "rel":"antiAffinityPolicyMapping", - "verbs":[ - "PUT", - "DELETE" - ] - }, - { - "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy", - "rel":"cpuAutoscalePolicyMapping", - "verbs":[ - "PUT", - "DELETE" - ] - } - ], - "locationId":"UC1", - "name":"test-server", - "os":"ubuntu14_64Bit", - "osType":"Ubuntu 14 64-bit", - "status":"active", - "storageType":"standard", - "type":"standard" - } - ] -''' - -__version__ = '${version}' - -import json -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException - from clc import APIFailedResponse -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcModifyServer: - clc = clc_sdk - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - self._set_clc_credentials_from_env() - - p = self.module.params - cpu = p.get('cpu') - memory = p.get('memory') - state = p.get('state') - if state == 'absent' and (cpu or memory): - return self.module.fail_json( - msg='\'absent\' state is not supported for \'cpu\' and \'memory\' arguments') - - server_ids = p['server_ids'] - if not isinstance(server_ids, list): - return self.module.fail_json( - msg='server_ids needs to be a list of instances to modify: %s' % - server_ids) - - (changed, server_dict_array, changed_server_ids) = self._modify_servers( - server_ids=server_ids) - - self.module.exit_json( - changed=changed, - server_ids=changed_server_ids, - servers=server_dict_array) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - server_ids=dict(type='list', required=True, elements='str'), - state=dict(default='present', choices=['present', 'absent']), - cpu=dict(), - memory=dict(), - anti_affinity_policy_id=dict(), - anti_affinity_policy_name=dict(), - alert_policy_id=dict(), - alert_policy_name=dict(), - wait=dict(type='bool', default=True) - ) - mutually_exclusive = [ - ['anti_affinity_policy_id', 'anti_affinity_policy_name'], - ['alert_policy_id', 'alert_policy_name'] - ] - return {"argument_spec": argument_spec, - "mutually_exclusive": mutually_exclusive} - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _get_servers_from_clc(self, server_list, message): - """ - Internal function to fetch list of CLC server objects from a list of server ids - :param server_list: The list of server ids - :param message: the error message to throw in case of any error - :return the list of CLC server objects - """ - try: - return self.clc.v2.Servers(server_list).servers - except CLCException as ex: - return self.module.fail_json(msg=message + ': %s' % ex.message) - - def _modify_servers(self, server_ids): - """ - modify the servers configuration on the provided list - :param server_ids: list of servers to modify - :return: a list of dictionaries with server information about the servers that were modified - """ - p = self.module.params - state = p.get('state') - server_params = { - 'cpu': p.get('cpu'), - 'memory': p.get('memory'), - 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'), - 'anti_affinity_policy_name': p.get('anti_affinity_policy_name'), - 'alert_policy_id': p.get('alert_policy_id'), - 'alert_policy_name': p.get('alert_policy_name'), - } - changed = False - server_changed = False - aa_changed = False - ap_changed = False - server_dict_array = [] - result_server_ids = [] - request_list = [] - changed_servers = [] - - if not isinstance(server_ids, list) or len(server_ids) < 1: - return self.module.fail_json( - msg='server_ids should be a list of servers, aborting') - - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - for server in servers: - if state == 'present': - server_changed, server_result = self._ensure_server_config( - server, server_params) - if server_result: - request_list.append(server_result) - aa_changed = self._ensure_aa_policy_present( - server, - server_params) - ap_changed = self._ensure_alert_policy_present( - server, - server_params) - elif state == 'absent': - aa_changed = self._ensure_aa_policy_absent( - server, - server_params) - ap_changed = self._ensure_alert_policy_absent( - server, - server_params) - if server_changed or aa_changed or ap_changed: - changed_servers.append(server) - changed = True - - self._wait_for_requests(self.module, request_list) - self._refresh_servers(self.module, changed_servers) - - for server in changed_servers: - server_dict_array.append(server.data) - result_server_ids.append(server.id) - - return changed, server_dict_array, result_server_ids - - def _ensure_server_config( - self, server, server_params): - """ - ensures the server is updated with the provided cpu and memory - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - cpu = server_params.get('cpu') - memory = server_params.get('memory') - changed = False - result = None - - if not cpu: - cpu = server.cpu - if not memory: - memory = server.memory - if memory != server.memory or cpu != server.cpu: - if not self.module.check_mode: - result = self._modify_clc_server( - self.clc, - self.module, - server.id, - cpu, - memory) - changed = True - return changed, result - - @staticmethod - def _modify_clc_server(clc, module, server_id, cpu, memory): - """ - Modify the memory or CPU of a clc server. - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param server_id: id of the server to modify - :param cpu: the new cpu value - :param memory: the new memory value - :return: the result of CLC API call - """ - result = None - acct_alias = clc.v2.Account.GetAlias() - try: - # Update the server configuration - job_obj = clc.v2.API.Call('PATCH', - 'servers/%s/%s' % (acct_alias, - server_id), - json.dumps([{"op": "set", - "member": "memory", - "value": memory}, - {"op": "set", - "member": "cpu", - "value": cpu}])) - result = clc.v2.Requests(job_obj) - except APIFailedResponse as ex: - module.fail_json( - msg='Unable to update the server configuration for server : "{0}". {1}'.format( - server_id, str(ex.response_text))) - return result - - @staticmethod - def _wait_for_requests(module, request_list): - """ - Block until server provisioning requests are completed. - :param module: the AnsibleModule object - :param request_list: a list of clc-sdk.Request instances - :return: none - """ - wait = module.params.get('wait') - if wait: - # Requests.WaitUntilComplete() returns the count of failed requests - failed_requests_count = sum( - [request.WaitUntilComplete() for request in request_list]) - - if failed_requests_count > 0: - module.fail_json( - msg='Unable to process modify server request') - - @staticmethod - def _refresh_servers(module, servers): - """ - Loop through a list of servers and refresh them. - :param module: the AnsibleModule object - :param servers: list of clc-sdk.Server instances to refresh - :return: none - """ - for server in servers: - try: - server.Refresh() - except CLCException as ex: - module.fail_json(msg='Unable to refresh the server {0}. {1}'.format( - server.id, ex.message - )) - - def _ensure_aa_policy_present( - self, server, server_params): - """ - ensures the server is updated with the provided anti affinity policy - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - acct_alias = self.clc.v2.Account.GetAlias() - - aa_policy_id = server_params.get('anti_affinity_policy_id') - aa_policy_name = server_params.get('anti_affinity_policy_name') - if not aa_policy_id and aa_policy_name: - aa_policy_id = self._get_aa_policy_id_by_name( - self.clc, - self.module, - acct_alias, - aa_policy_name) - current_aa_policy_id = self._get_aa_policy_id_of_server( - self.clc, - self.module, - acct_alias, - server.id) - - if aa_policy_id and aa_policy_id != current_aa_policy_id: - self._modify_aa_policy( - self.clc, - self.module, - acct_alias, - server.id, - aa_policy_id) - changed = True - return changed - - def _ensure_aa_policy_absent( - self, server, server_params): - """ - ensures the provided anti affinity policy is removed from the server - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - acct_alias = self.clc.v2.Account.GetAlias() - aa_policy_id = server_params.get('anti_affinity_policy_id') - aa_policy_name = server_params.get('anti_affinity_policy_name') - if not aa_policy_id and aa_policy_name: - aa_policy_id = self._get_aa_policy_id_by_name( - self.clc, - self.module, - acct_alias, - aa_policy_name) - current_aa_policy_id = self._get_aa_policy_id_of_server( - self.clc, - self.module, - acct_alias, - server.id) - - if aa_policy_id and aa_policy_id == current_aa_policy_id: - self._delete_aa_policy( - self.clc, - self.module, - acct_alias, - server.id) - changed = True - return changed - - @staticmethod - def _modify_aa_policy(clc, module, acct_alias, server_id, aa_policy_id): - """ - modifies the anti affinity policy of the CLC server - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param acct_alias: the CLC account alias - :param server_id: the CLC server id - :param aa_policy_id: the anti affinity policy id - :return: result: The result from the CLC API call - """ - result = None - if not module.check_mode: - try: - result = clc.v2.API.Call('PUT', - 'servers/%s/%s/antiAffinityPolicy' % ( - acct_alias, - server_id), - json.dumps({"id": aa_policy_id})) - except APIFailedResponse as ex: - module.fail_json( - msg='Unable to modify anti affinity policy to server : "{0}". {1}'.format( - server_id, str(ex.response_text))) - return result - - @staticmethod - def _delete_aa_policy(clc, module, acct_alias, server_id): - """ - Delete the anti affinity policy of the CLC server - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param acct_alias: the CLC account alias - :param server_id: the CLC server id - :return: result: The result from the CLC API call - """ - result = None - if not module.check_mode: - try: - result = clc.v2.API.Call('DELETE', - 'servers/%s/%s/antiAffinityPolicy' % ( - acct_alias, - server_id), - json.dumps({})) - except APIFailedResponse as ex: - module.fail_json( - msg='Unable to delete anti affinity policy to server : "{0}". {1}'.format( - server_id, str(ex.response_text))) - return result - - @staticmethod - def _get_aa_policy_id_by_name(clc, module, alias, aa_policy_name): - """ - retrieves the anti affinity policy id of the server based on the name of the policy - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the CLC account alias - :param aa_policy_name: the anti affinity policy name - :return: aa_policy_id: The anti affinity policy id - """ - aa_policy_id = None - try: - aa_policies = clc.v2.API.Call(method='GET', - url='antiAffinityPolicies/%s' % alias) - except APIFailedResponse as ex: - return module.fail_json( - msg='Unable to fetch anti affinity policies from account alias : "{0}". {1}'.format( - alias, str(ex.response_text))) - for aa_policy in aa_policies.get('items'): - if aa_policy.get('name') == aa_policy_name: - if not aa_policy_id: - aa_policy_id = aa_policy.get('id') - else: - return module.fail_json( - msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name) - if not aa_policy_id: - module.fail_json( - msg='No anti affinity policy was found with policy name : %s' % aa_policy_name) - return aa_policy_id - - @staticmethod - def _get_aa_policy_id_of_server(clc, module, alias, server_id): - """ - retrieves the anti affinity policy id of the server based on the CLC server id - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the CLC account alias - :param server_id: the CLC server id - :return: aa_policy_id: The anti affinity policy id - """ - aa_policy_id = None - try: - result = clc.v2.API.Call( - method='GET', url='servers/%s/%s/antiAffinityPolicy' % - (alias, server_id)) - aa_policy_id = result.get('id') - except APIFailedResponse as ex: - if ex.response_status_code != 404: - module.fail_json(msg='Unable to fetch anti affinity policy for server "{0}". {1}'.format( - server_id, str(ex.response_text))) - return aa_policy_id - - def _ensure_alert_policy_present( - self, server, server_params): - """ - ensures the server is updated with the provided alert policy - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - acct_alias = self.clc.v2.Account.GetAlias() - alert_policy_id = server_params.get('alert_policy_id') - alert_policy_name = server_params.get('alert_policy_name') - if not alert_policy_id and alert_policy_name: - alert_policy_id = self._get_alert_policy_id_by_name( - self.clc, - self.module, - acct_alias, - alert_policy_name) - if alert_policy_id and not self._alert_policy_exists( - server, alert_policy_id): - self._add_alert_policy_to_server( - self.clc, - self.module, - acct_alias, - server.id, - alert_policy_id) - changed = True - return changed - - def _ensure_alert_policy_absent( - self, server, server_params): - """ - ensures the alert policy is removed from the server - :param server: the CLC server object - :param server_params: the dictionary of server parameters - :return: (changed, group) - - changed: Boolean whether a change was made - result: The result from the CLC API call - """ - changed = False - - acct_alias = self.clc.v2.Account.GetAlias() - alert_policy_id = server_params.get('alert_policy_id') - alert_policy_name = server_params.get('alert_policy_name') - if not alert_policy_id and alert_policy_name: - alert_policy_id = self._get_alert_policy_id_by_name( - self.clc, - self.module, - acct_alias, - alert_policy_name) - - if alert_policy_id and self._alert_policy_exists( - server, alert_policy_id): - self._remove_alert_policy_to_server( - self.clc, - self.module, - acct_alias, - server.id, - alert_policy_id) - changed = True - return changed - - @staticmethod - def _add_alert_policy_to_server( - clc, module, acct_alias, server_id, alert_policy_id): - """ - add the alert policy to CLC server - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param acct_alias: the CLC account alias - :param server_id: the CLC server id - :param alert_policy_id: the alert policy id - :return: result: The result from the CLC API call - """ - result = None - if not module.check_mode: - try: - result = clc.v2.API.Call('POST', - 'servers/%s/%s/alertPolicies' % ( - acct_alias, - server_id), - json.dumps({"id": alert_policy_id})) - except APIFailedResponse as ex: - module.fail_json(msg='Unable to set alert policy to the server : "{0}". {1}'.format( - server_id, str(ex.response_text))) - return result - - @staticmethod - def _remove_alert_policy_to_server( - clc, module, acct_alias, server_id, alert_policy_id): - """ - remove the alert policy to the CLC server - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param acct_alias: the CLC account alias - :param server_id: the CLC server id - :param alert_policy_id: the alert policy id - :return: result: The result from the CLC API call - """ - result = None - if not module.check_mode: - try: - result = clc.v2.API.Call('DELETE', - 'servers/%s/%s/alertPolicies/%s' - % (acct_alias, server_id, alert_policy_id)) - except APIFailedResponse as ex: - module.fail_json(msg='Unable to remove alert policy from the server : "{0}". {1}'.format( - server_id, str(ex.response_text))) - return result - - @staticmethod - def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name): - """ - retrieves the alert policy id of the server based on the name of the policy - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the CLC account alias - :param alert_policy_name: the alert policy name - :return: alert_policy_id: The alert policy id - """ - alert_policy_id = None - try: - alert_policies = clc.v2.API.Call(method='GET', - url='alertPolicies/%s' % alias) - except APIFailedResponse as ex: - return module.fail_json(msg='Unable to fetch alert policies for account : "{0}". {1}'.format( - alias, str(ex.response_text))) - for alert_policy in alert_policies.get('items'): - if alert_policy.get('name') == alert_policy_name: - if not alert_policy_id: - alert_policy_id = alert_policy.get('id') - else: - return module.fail_json( - msg='multiple alert policies were found with policy name : %s' % alert_policy_name) - return alert_policy_id - - @staticmethod - def _alert_policy_exists(server, alert_policy_id): - """ - Checks if the alert policy exists for the server - :param server: the clc server object - :param alert_policy_id: the alert policy - :return: True: if the given alert policy id associated to the server, False otherwise - """ - result = False - alert_policies = server.alertPolicies - if alert_policies: - for alert_policy in alert_policies: - if alert_policy.get('id') == alert_policy_id: - result = True - return result - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - - argument_dict = ClcModifyServer._define_module_argument_spec() - module = AnsibleModule(supports_check_mode=True, **argument_dict) - clc_modify_server = ClcModifyServer(module) - clc_modify_server.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/clc_publicip.py b/plugins/modules/clc_publicip.py deleted file mode 100644 index 5111b3cf19..0000000000 --- a/plugins/modules/clc_publicip.py +++ /dev/null @@ -1,362 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_publicip -short_description: Add and Delete public ips on servers in CenturyLink Cloud -description: - - An Ansible module to add or delete public ip addresses on an existing server or servers in CenturyLink Cloud. -options: - protocol: - description: - - The protocol that the public IP will listen for. - type: str - default: TCP - choices: ['TCP', 'UDP', 'ICMP'] - ports: - description: - - A list of ports to expose. This is required when state is 'present' - type: list - elements: int - server_ids: - description: - - A list of servers to create public ips on. - type: list - required: true - elements: str - state: - description: - - Determine whether to create or delete public IPs. If present module will not create a second public ip if one - already exists. - type: str - default: present - choices: ['present', 'absent'] - wait: - description: - - Whether to wait for the tasks to finish before returning. - type: bool - default: true -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: Add Public IP to Server - hosts: localhost - gather_facts: false - connection: local - tasks: - - name: Create Public IP For Servers - community.general.clc_publicip: - protocol: TCP - ports: - - 80 - server_ids: - - UC1TEST-SVR01 - - UC1TEST-SVR02 - state: present - register: clc - - - name: Debug - ansible.builtin.debug: - var: clc - -- name: Delete Public IP from Server - hosts: localhost - gather_facts: false - connection: local - tasks: - - name: Create Public IP For Servers - community.general.clc_publicip: - server_ids: - - UC1TEST-SVR01 - - UC1TEST-SVR02 - state: absent - register: clc - - - name: Debug - ansible.builtin.debug: - var: clc -''' - -RETURN = ''' -server_ids: - description: The list of server ids that are changed - returned: success - type: list - sample: - [ - "UC1TEST-SVR01", - "UC1TEST-SVR02" - ] -''' - -__version__ = '${version}' - -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcPublicIp(object): - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - self._set_clc_credentials_from_env() - params = self.module.params - server_ids = params['server_ids'] - ports = params['ports'] - protocol = params['protocol'] - state = params['state'] - - if state == 'present': - changed, changed_server_ids, requests = self.ensure_public_ip_present( - server_ids=server_ids, protocol=protocol, ports=ports) - elif state == 'absent': - changed, changed_server_ids, requests = self.ensure_public_ip_absent( - server_ids=server_ids) - else: - return self.module.fail_json(msg="Unknown State: " + state) - self._wait_for_requests_to_complete(requests) - return self.module.exit_json(changed=changed, - server_ids=changed_server_ids) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - server_ids=dict(type='list', required=True, elements='str'), - protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']), - ports=dict(type='list', elements='int'), - wait=dict(type='bool', default=True), - state=dict(default='present', choices=['present', 'absent']), - ) - return argument_spec - - def ensure_public_ip_present(self, server_ids, protocol, ports): - """ - Ensures the given server ids having the public ip available - :param server_ids: the list of server ids - :param protocol: the ip protocol - :param ports: the list of ports to expose - :return: (changed, changed_server_ids, results) - changed: A flag indicating if there is any change - changed_server_ids : the list of server ids that are changed - results: The result list from clc public ip call - """ - changed = False - results = [] - changed_server_ids = [] - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - servers_to_change = [ - server for server in servers if len( - server.PublicIPs().public_ips) == 0] - ports_to_expose = [{'protocol': protocol, 'port': port} - for port in ports] - for server in servers_to_change: - if not self.module.check_mode: - result = self._add_publicip_to_server(server, ports_to_expose) - results.append(result) - changed_server_ids.append(server.id) - changed = True - return changed, changed_server_ids, results - - def _add_publicip_to_server(self, server, ports_to_expose): - result = None - try: - result = server.PublicIPs().Add(ports_to_expose) - except CLCException as ex: - self.module.fail_json(msg='Failed to add public ip to the server : {0}. {1}'.format( - server.id, ex.response_text - )) - return result - - def ensure_public_ip_absent(self, server_ids): - """ - Ensures the given server ids having the public ip removed if there is any - :param server_ids: the list of server ids - :return: (changed, changed_server_ids, results) - changed: A flag indicating if there is any change - changed_server_ids : the list of server ids that are changed - results: The result list from clc public ip call - """ - changed = False - results = [] - changed_server_ids = [] - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - servers_to_change = [ - server for server in servers if len( - server.PublicIPs().public_ips) > 0] - for server in servers_to_change: - if not self.module.check_mode: - result = self._remove_publicip_from_server(server) - results.append(result) - changed_server_ids.append(server.id) - changed = True - return changed, changed_server_ids, results - - def _remove_publicip_from_server(self, server): - result = None - try: - for ip_address in server.PublicIPs().public_ips: - result = ip_address.Delete() - except CLCException as ex: - self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format( - server.id, ex.response_text - )) - return result - - def _wait_for_requests_to_complete(self, requests_lst): - """ - Waits until the CLC requests are complete if the wait argument is True - :param requests_lst: The list of CLC request objects - :return: none - """ - if not self.module.params['wait']: - return - for request in requests_lst: - request.WaitUntilComplete() - for request_details in request.requests: - if request_details.Status() != 'succeeded': - self.module.fail_json( - msg='Unable to process public ip request') - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - def _get_servers_from_clc(self, server_ids, message): - """ - Gets list of servers form CLC api - """ - try: - return self.clc.v2.Servers(server_ids).servers - except CLCException as exception: - self.module.fail_json(msg=message + ': %s' % exception) - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - module = AnsibleModule( - argument_spec=ClcPublicIp._define_module_argument_spec(), - supports_check_mode=True - ) - clc_public_ip = ClcPublicIp(module) - clc_public_ip.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/clc_server.py b/plugins/modules/clc_server.py deleted file mode 100644 index d8e4f16217..0000000000 --- a/plugins/modules/clc_server.py +++ /dev/null @@ -1,1563 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_server -short_description: Create, Delete, Start and Stop servers in CenturyLink Cloud -description: - - An Ansible module to Create, Delete, Start and Stop servers in CenturyLink Cloud. -options: - additional_disks: - description: - - The list of additional disks for the server - type: list - elements: dict - default: [] - add_public_ip: - description: - - Whether to add a public ip to the server - type: bool - default: false - alias: - description: - - The account alias to provision the servers under. - type: str - anti_affinity_policy_id: - description: - - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_name'. - type: str - anti_affinity_policy_name: - description: - - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_id'. - type: str - alert_policy_id: - description: - - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_name'. - type: str - alert_policy_name: - description: - - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_id'. - type: str - count: - description: - - The number of servers to build (mutually exclusive with exact_count) - default: 1 - type: int - count_group: - description: - - Required when exact_count is specified. The Server Group use to determine how many servers to deploy. - type: str - cpu: - description: - - How many CPUs to provision on the server - default: 1 - type: int - cpu_autoscale_policy_id: - description: - - The autoscale policy to assign to the server. - type: str - custom_fields: - description: - - The list of custom fields to set on the server. - type: list - default: [] - elements: dict - description: - description: - - The description to set for the server. - type: str - exact_count: - description: - - Run in idempotent mode. Will insure that this exact number of servers are running in the provided group, - creating and deleting them to reach that count. Requires count_group to be set. - type: int - group: - description: - - The Server Group to create servers under. - type: str - default: 'Default Group' - ip_address: - description: - - The IP Address for the server. One is assigned if not provided. - type: str - location: - description: - - The Datacenter to create servers in. - type: str - managed_os: - description: - - Whether to create the server as 'Managed' or not. - type: bool - default: false - required: false - memory: - description: - - Memory in GB. - type: int - default: 1 - name: - description: - - A 1 to 6 character identifier to use for the server. This is required when state is 'present' - type: str - network_id: - description: - - The network UUID on which to create servers. - type: str - packages: - description: - - The list of blue print packages to run on the server after its created. - type: list - elements: dict - default: [] - password: - description: - - Password for the administrator / root user - type: str - primary_dns: - description: - - Primary DNS used by the server. - type: str - public_ip_protocol: - description: - - The protocol to use for the public ip if add_public_ip is set to True. - type: str - default: 'TCP' - choices: ['TCP', 'UDP', 'ICMP'] - public_ip_ports: - description: - - A list of ports to allow on the firewall to the servers public ip, if add_public_ip is set to True. - type: list - elements: dict - default: [] - secondary_dns: - description: - - Secondary DNS used by the server. - type: str - server_ids: - description: - - Required for started, stopped, and absent states. - A list of server Ids to insure are started, stopped, or absent. - type: list - default: [] - elements: str - source_server_password: - description: - - The password for the source server if a clone is specified. - type: str - state: - description: - - The state to insure that the provided resources are in. - type: str - default: 'present' - choices: ['present', 'absent', 'started', 'stopped'] - storage_type: - description: - - The type of storage to attach to the server. - type: str - default: 'standard' - choices: ['standard', 'hyperscale'] - template: - description: - - The template to use for server creation. Will search for a template if a partial string is provided. - This is required when state is 'present' - type: str - ttl: - description: - - The time to live for the server in seconds. The server will be deleted when this time expires. - type: str - type: - description: - - The type of server to create. - type: str - default: 'standard' - choices: ['standard', 'hyperscale', 'bareMetal'] - configuration_id: - description: - - Only required for bare metal servers. - Specifies the identifier for the specific configuration type of bare metal server to deploy. - type: str - os_type: - description: - - Only required for bare metal servers. - Specifies the OS to provision with the bare metal server. - type: str - choices: ['redHat6_64Bit', 'centOS6_64Bit', 'windows2012R2Standard_64Bit', 'ubuntu14_64Bit'] - wait: - description: - - Whether to wait for the provisioning tasks to finish before returning. - type: bool - default: true -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: Provision a single Ubuntu Server - community.general.clc_server: - name: test - template: ubuntu-14-64 - count: 1 - group: Default Group - state: present - -- name: Ensure 'Default Group' has exactly 5 servers - community.general.clc_server: - name: test - template: ubuntu-14-64 - exact_count: 5 - count_group: Default Group - group: Default Group - -- name: Stop a Server - community.general.clc_server: - server_ids: - - UC1ACCT-TEST01 - state: stopped - -- name: Start a Server - community.general.clc_server: - server_ids: - - UC1ACCT-TEST01 - state: started - -- name: Delete a Server - community.general.clc_server: - server_ids: - - UC1ACCT-TEST01 - state: absent -''' - -RETURN = ''' -server_ids: - description: The list of server ids that are created - returned: success - type: list - sample: - [ - "UC1TEST-SVR01", - "UC1TEST-SVR02" - ] -partially_created_server_ids: - description: The list of server ids that are partially created - returned: success - type: list - sample: - [ - "UC1TEST-SVR01", - "UC1TEST-SVR02" - ] -servers: - description: The list of server objects returned from CLC - returned: success - type: list - sample: - [ - { - "changeInfo":{ - "createdBy":"service.wfad", - "createdDate":1438196820, - "modifiedBy":"service.wfad", - "modifiedDate":1438196820 - }, - "description":"test-server", - "details":{ - "alertPolicies":[ - - ], - "cpu":1, - "customFields":[ - - ], - "diskCount":3, - "disks":[ - { - "id":"0:0", - "partitionPaths":[ - - ], - "sizeGB":1 - }, - { - "id":"0:1", - "partitionPaths":[ - - ], - "sizeGB":2 - }, - { - "id":"0:2", - "partitionPaths":[ - - ], - "sizeGB":14 - } - ], - "hostName":"", - "inMaintenanceMode":false, - "ipAddresses":[ - { - "internal":"10.1.1.1" - } - ], - "memoryGB":1, - "memoryMB":1024, - "partitions":[ - - ], - "powerState":"started", - "snapshots":[ - - ], - "storageGB":17 - }, - "groupId":"086ac1dfe0b6411989e8d1b77c4065f0", - "id":"test-server", - "ipaddress":"10.120.45.23", - "isTemplate":false, - "links":[ - { - "href":"/v2/servers/wfad/test-server", - "id":"test-server", - "rel":"self", - "verbs":[ - "GET", - "PATCH", - "DELETE" - ] - }, - { - "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0", - "id":"086ac1dfe0b6411989e8d1b77c4065f0", - "rel":"group" - }, - { - "href":"/v2/accounts/wfad", - "id":"wfad", - "rel":"account" - }, - { - "href":"/v2/billing/wfad/serverPricing/test-server", - "rel":"billing" - }, - { - "href":"/v2/servers/wfad/test-server/publicIPAddresses", - "rel":"publicIPAddresses", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/credentials", - "rel":"credentials" - }, - { - "href":"/v2/servers/wfad/test-server/statistics", - "rel":"statistics" - }, - { - "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities", - "rel":"upcomingScheduledActivities" - }, - { - "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities", - "rel":"scheduledActivities", - "verbs":[ - "GET", - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/capabilities", - "rel":"capabilities" - }, - { - "href":"/v2/servers/wfad/test-server/alertPolicies", - "rel":"alertPolicyMappings", - "verbs":[ - "POST" - ] - }, - { - "href":"/v2/servers/wfad/test-server/antiAffinityPolicy", - "rel":"antiAffinityPolicyMapping", - "verbs":[ - "PUT", - "DELETE" - ] - }, - { - "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy", - "rel":"cpuAutoscalePolicyMapping", - "verbs":[ - "PUT", - "DELETE" - ] - } - ], - "locationId":"UC1", - "name":"test-server", - "os":"ubuntu14_64Bit", - "osType":"Ubuntu 14 64-bit", - "status":"active", - "storageType":"standard", - "type":"standard" - } - ] -''' - -__version__ = '${version}' - -import json -import os -import time -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException - from clc import APIFailedResponse -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcServer: - clc = clc_sdk - - def __init__(self, module): - """ - Construct module - """ - self.clc = clc_sdk - self.module = module - self.group_dict = {} - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - changed = False - new_server_ids = [] - server_dict_array = [] - - self._set_clc_credentials_from_env() - self.module.params = self._validate_module_params( - self.clc, - self.module) - p = self.module.params - state = p.get('state') - - # - # Handle each state - # - partial_servers_ids = [] - if state == 'absent': - server_ids = p['server_ids'] - if not isinstance(server_ids, list): - return self.module.fail_json( - msg='server_ids needs to be a list of instances to delete: %s' % - server_ids) - - (changed, - server_dict_array, - new_server_ids) = self._delete_servers(module=self.module, - clc=self.clc, - server_ids=server_ids) - - elif state in ('started', 'stopped'): - server_ids = p.get('server_ids') - if not isinstance(server_ids, list): - return self.module.fail_json( - msg='server_ids needs to be a list of servers to run: %s' % - server_ids) - - (changed, - server_dict_array, - new_server_ids) = self._start_stop_servers(self.module, - self.clc, - server_ids) - - elif state == 'present': - # Changed is always set to true when provisioning new instances - if not p.get('template') and p.get('type') != 'bareMetal': - return self.module.fail_json( - msg='template parameter is required for new instance') - - if p.get('exact_count') is None: - (server_dict_array, - new_server_ids, - partial_servers_ids, - changed) = self._create_servers(self.module, - self.clc) - else: - (server_dict_array, - new_server_ids, - partial_servers_ids, - changed) = self._enforce_count(self.module, - self.clc) - - self.module.exit_json( - changed=changed, - server_ids=new_server_ids, - partially_created_server_ids=partial_servers_ids, - servers=server_dict_array) - - @staticmethod - def _define_module_argument_spec(): - """ - Define the argument spec for the ansible module - :return: argument spec dictionary - """ - argument_spec = dict( - name=dict(), - template=dict(), - group=dict(default='Default Group'), - network_id=dict(), - location=dict(), - cpu=dict(default=1, type='int'), - memory=dict(default=1, type='int'), - alias=dict(), - password=dict(no_log=True), - ip_address=dict(), - storage_type=dict( - default='standard', - choices=[ - 'standard', - 'hyperscale']), - type=dict(default='standard', choices=['standard', 'hyperscale', 'bareMetal']), - primary_dns=dict(), - secondary_dns=dict(), - additional_disks=dict(type='list', default=[], elements='dict'), - custom_fields=dict(type='list', default=[], elements='dict'), - ttl=dict(), - managed_os=dict(type='bool', default=False), - description=dict(), - source_server_password=dict(no_log=True), - cpu_autoscale_policy_id=dict(), - anti_affinity_policy_id=dict(), - anti_affinity_policy_name=dict(), - alert_policy_id=dict(), - alert_policy_name=dict(), - packages=dict(type='list', default=[], elements='dict'), - state=dict( - default='present', - choices=[ - 'present', - 'absent', - 'started', - 'stopped']), - count=dict(type='int', default=1), - exact_count=dict(type='int', ), - count_group=dict(), - server_ids=dict(type='list', default=[], elements='str'), - add_public_ip=dict(type='bool', default=False), - public_ip_protocol=dict( - default='TCP', - choices=[ - 'TCP', - 'UDP', - 'ICMP']), - public_ip_ports=dict(type='list', default=[], elements='dict'), - configuration_id=dict(), - os_type=dict(choices=[ - 'redHat6_64Bit', - 'centOS6_64Bit', - 'windows2012R2Standard_64Bit', - 'ubuntu14_64Bit' - ]), - wait=dict(type='bool', default=True)) - - mutually_exclusive = [ - ['exact_count', 'count'], - ['exact_count', 'state'], - ['anti_affinity_policy_id', 'anti_affinity_policy_name'], - ['alert_policy_id', 'alert_policy_name'], - ] - return {"argument_spec": argument_spec, - "mutually_exclusive": mutually_exclusive} - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - @staticmethod - def _validate_module_params(clc, module): - """ - Validate the module params, and lookup default values. - :param clc: clc-sdk instance to use - :param module: module to validate - :return: dictionary of validated params - """ - params = module.params - datacenter = ClcServer._find_datacenter(clc, module) - - ClcServer._validate_types(module) - ClcServer._validate_name(module) - - params['alias'] = ClcServer._find_alias(clc, module) - params['cpu'] = ClcServer._find_cpu(clc, module) - params['memory'] = ClcServer._find_memory(clc, module) - params['description'] = ClcServer._find_description(module) - params['ttl'] = ClcServer._find_ttl(clc, module) - params['template'] = ClcServer._find_template_id(module, datacenter) - params['group'] = ClcServer._find_group(module, datacenter).id - params['network_id'] = ClcServer._find_network_id(module, datacenter) - params['anti_affinity_policy_id'] = ClcServer._find_aa_policy_id( - clc, - module) - params['alert_policy_id'] = ClcServer._find_alert_policy_id( - clc, - module) - - return params - - @staticmethod - def _find_datacenter(clc, module): - """ - Find the datacenter by calling the CLC API. - :param clc: clc-sdk instance to use - :param module: module to validate - :return: clc-sdk.Datacenter instance - """ - location = module.params.get('location') - try: - if not location: - account = clc.v2.Account() - location = account.data.get('primaryDataCenter') - data_center = clc.v2.Datacenter(location) - return data_center - except CLCException: - module.fail_json(msg="Unable to find location: {0}".format(location)) - - @staticmethod - def _find_alias(clc, module): - """ - Find or Validate the Account Alias by calling the CLC API - :param clc: clc-sdk instance to use - :param module: module to validate - :return: clc-sdk.Account instance - """ - alias = module.params.get('alias') - if not alias: - try: - alias = clc.v2.Account.GetAlias() - except CLCException as ex: - module.fail_json(msg='Unable to find account alias. {0}'.format( - ex.message - )) - return alias - - @staticmethod - def _find_cpu(clc, module): - """ - Find or validate the CPU value by calling the CLC API - :param clc: clc-sdk instance to use - :param module: module to validate - :return: Int value for CPU - """ - cpu = module.params.get('cpu') - group_id = module.params.get('group_id') - alias = module.params.get('alias') - state = module.params.get('state') - - if not cpu and state == 'present': - group = clc.v2.Group(id=group_id, - alias=alias) - if group.Defaults("cpu"): - cpu = group.Defaults("cpu") - else: - module.fail_json( - msg=str("Can\'t determine a default cpu value. Please provide a value for cpu.")) - return cpu - - @staticmethod - def _find_memory(clc, module): - """ - Find or validate the Memory value by calling the CLC API - :param clc: clc-sdk instance to use - :param module: module to validate - :return: Int value for Memory - """ - memory = module.params.get('memory') - group_id = module.params.get('group_id') - alias = module.params.get('alias') - state = module.params.get('state') - - if not memory and state == 'present': - group = clc.v2.Group(id=group_id, - alias=alias) - if group.Defaults("memory"): - memory = group.Defaults("memory") - else: - module.fail_json(msg=str( - "Can\'t determine a default memory value. Please provide a value for memory.")) - return memory - - @staticmethod - def _find_description(module): - """ - Set the description module param to name if description is blank - :param module: the module to validate - :return: string description - """ - description = module.params.get('description') - if not description: - description = module.params.get('name') - return description - - @staticmethod - def _validate_types(module): - """ - Validate that type and storage_type are set appropriately, and fail if not - :param module: the module to validate - :return: none - """ - state = module.params.get('state') - server_type = module.params.get( - 'type').lower() if module.params.get('type') else None - storage_type = module.params.get( - 'storage_type').lower() if module.params.get('storage_type') else None - - if state == "present": - if server_type == "standard" and storage_type not in ( - "standard", "premium"): - module.fail_json( - msg=str("Standard VMs must have storage_type = 'standard' or 'premium'")) - - if server_type == "hyperscale" and storage_type != "hyperscale": - module.fail_json( - msg=str("Hyperscale VMs must have storage_type = 'hyperscale'")) - - @staticmethod - def _validate_name(module): - """ - Validate that name is the correct length if provided, fail if it's not - :param module: the module to validate - :return: none - """ - server_name = module.params.get('name') - state = module.params.get('state') - - if state == 'present' and ( - len(server_name) < 1 or len(server_name) > 6): - module.fail_json(msg=str( - "When state = 'present', name must be a string with a minimum length of 1 and a maximum length of 6")) - - @staticmethod - def _find_ttl(clc, module): - """ - Validate that TTL is > 3600 if set, and fail if not - :param clc: clc-sdk instance to use - :param module: module to validate - :return: validated ttl - """ - ttl = module.params.get('ttl') - - if ttl: - if ttl <= 3600: - return module.fail_json(msg=str("Ttl cannot be <= 3600")) - else: - ttl = clc.v2.time_utils.SecondsToZuluTS(int(time.time()) + ttl) - return ttl - - @staticmethod - def _find_template_id(module, datacenter): - """ - Find the template id by calling the CLC API. - :param module: the module to validate - :param datacenter: the datacenter to search for the template - :return: a valid clc template id - """ - lookup_template = module.params.get('template') - state = module.params.get('state') - type = module.params.get('type') - result = None - - if state == 'present' and type != 'bareMetal': - try: - result = datacenter.Templates().Search(lookup_template)[0].id - except CLCException: - module.fail_json( - msg=str( - "Unable to find a template: " + - lookup_template + - " in location: " + - datacenter.id)) - return result - - @staticmethod - def _find_network_id(module, datacenter): - """ - Validate the provided network id or return a default. - :param module: the module to validate - :param datacenter: the datacenter to search for a network id - :return: a valid network id - """ - network_id = module.params.get('network_id') - - if not network_id: - try: - network_id = datacenter.Networks().networks[0].id - # -- added for clc-sdk 2.23 compatibility - # datacenter_networks = clc_sdk.v2.Networks( - # networks_lst=datacenter._DeploymentCapabilities()['deployableNetworks']) - # network_id = datacenter_networks.networks[0].id - # -- end - except CLCException: - module.fail_json( - msg=str( - "Unable to find a network in location: " + - datacenter.id)) - - return network_id - - @staticmethod - def _find_aa_policy_id(clc, module): - """ - Validate if the anti affinity policy exist for the given name and throw error if not - :param clc: the clc-sdk instance - :param module: the module to validate - :return: aa_policy_id: the anti affinity policy id of the given name. - """ - aa_policy_id = module.params.get('anti_affinity_policy_id') - aa_policy_name = module.params.get('anti_affinity_policy_name') - if not aa_policy_id and aa_policy_name: - alias = module.params.get('alias') - aa_policy_id = ClcServer._get_anti_affinity_policy_id( - clc, - module, - alias, - aa_policy_name) - if not aa_policy_id: - module.fail_json( - msg='No anti affinity policy was found with policy name : %s' % aa_policy_name) - return aa_policy_id - - @staticmethod - def _find_alert_policy_id(clc, module): - """ - Validate if the alert policy exist for the given name and throw error if not - :param clc: the clc-sdk instance - :param module: the module to validate - :return: alert_policy_id: the alert policy id of the given name. - """ - alert_policy_id = module.params.get('alert_policy_id') - alert_policy_name = module.params.get('alert_policy_name') - if not alert_policy_id and alert_policy_name: - alias = module.params.get('alias') - alert_policy_id = ClcServer._get_alert_policy_id_by_name( - clc=clc, - module=module, - alias=alias, - alert_policy_name=alert_policy_name - ) - if not alert_policy_id: - module.fail_json( - msg='No alert policy exist with name : %s' % alert_policy_name) - return alert_policy_id - - def _create_servers(self, module, clc, override_count=None): - """ - Create New Servers in CLC cloud - :param module: the AnsibleModule object - :param clc: the clc-sdk instance to use - :return: a list of dictionaries with server information about the servers that were created - """ - p = module.params - request_list = [] - servers = [] - server_dict_array = [] - created_server_ids = [] - partial_created_servers_ids = [] - - add_public_ip = p.get('add_public_ip') - public_ip_protocol = p.get('public_ip_protocol') - public_ip_ports = p.get('public_ip_ports') - - params = { - 'name': p.get('name'), - 'template': p.get('template'), - 'group_id': p.get('group'), - 'network_id': p.get('network_id'), - 'cpu': p.get('cpu'), - 'memory': p.get('memory'), - 'alias': p.get('alias'), - 'password': p.get('password'), - 'ip_address': p.get('ip_address'), - 'storage_type': p.get('storage_type'), - 'type': p.get('type'), - 'primary_dns': p.get('primary_dns'), - 'secondary_dns': p.get('secondary_dns'), - 'additional_disks': p.get('additional_disks'), - 'custom_fields': p.get('custom_fields'), - 'ttl': p.get('ttl'), - 'managed_os': p.get('managed_os'), - 'description': p.get('description'), - 'source_server_password': p.get('source_server_password'), - 'cpu_autoscale_policy_id': p.get('cpu_autoscale_policy_id'), - 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'), - 'packages': p.get('packages'), - 'configuration_id': p.get('configuration_id'), - 'os_type': p.get('os_type') - } - - count = override_count if override_count else p.get('count') - - changed = False if count == 0 else True - - if not changed: - return server_dict_array, created_server_ids, partial_created_servers_ids, changed - for i in range(0, count): - if not module.check_mode: - req = self._create_clc_server(clc=clc, - module=module, - server_params=params) - server = req.requests[0].Server() - request_list.append(req) - servers.append(server) - - self._wait_for_requests(module, request_list) - self._refresh_servers(module, servers) - - ip_failed_servers = self._add_public_ip_to_servers( - module=module, - should_add_public_ip=add_public_ip, - servers=servers, - public_ip_protocol=public_ip_protocol, - public_ip_ports=public_ip_ports) - ap_failed_servers = self._add_alert_policy_to_servers(clc=clc, - module=module, - servers=servers) - - for server in servers: - if server in ip_failed_servers or server in ap_failed_servers: - partial_created_servers_ids.append(server.id) - else: - # reload server details - server = clc.v2.Server(server.id) - server.data['ipaddress'] = server.details[ - 'ipAddresses'][0]['internal'] - - if add_public_ip and len(server.PublicIPs().public_ips) > 0: - server.data['publicip'] = str( - server.PublicIPs().public_ips[0]) - created_server_ids.append(server.id) - server_dict_array.append(server.data) - - return server_dict_array, created_server_ids, partial_created_servers_ids, changed - - def _enforce_count(self, module, clc): - """ - Enforce that there is the right number of servers in the provided group. - Starts or stops servers as necessary. - :param module: the AnsibleModule object - :param clc: the clc-sdk instance to use - :return: a list of dictionaries with server information about the servers that were created or deleted - """ - p = module.params - changed = False - count_group = p.get('count_group') - datacenter = ClcServer._find_datacenter(clc, module) - exact_count = p.get('exact_count') - server_dict_array = [] - partial_servers_ids = [] - changed_server_ids = [] - - # fail here if the exact count was specified without filtering - # on a group, as this may lead to a undesired removal of instances - if exact_count and count_group is None: - return module.fail_json( - msg="you must use the 'count_group' option with exact_count") - - servers, running_servers = ClcServer._find_running_servers_by_group( - module, datacenter, count_group) - - if len(running_servers) == exact_count: - changed = False - - elif len(running_servers) < exact_count: - to_create = exact_count - len(running_servers) - server_dict_array, changed_server_ids, partial_servers_ids, changed \ - = self._create_servers(module, clc, override_count=to_create) - - for server in server_dict_array: - running_servers.append(server) - - elif len(running_servers) > exact_count: - to_remove = len(running_servers) - exact_count - all_server_ids = sorted([x.id for x in running_servers]) - remove_ids = all_server_ids[0:to_remove] - - (changed, server_dict_array, changed_server_ids) \ - = ClcServer._delete_servers(module, clc, remove_ids) - - return server_dict_array, changed_server_ids, partial_servers_ids, changed - - @staticmethod - def _wait_for_requests(module, request_list): - """ - Block until server provisioning requests are completed. - :param module: the AnsibleModule object - :param request_list: a list of clc-sdk.Request instances - :return: none - """ - wait = module.params.get('wait') - if wait: - # Requests.WaitUntilComplete() returns the count of failed requests - failed_requests_count = sum( - [request.WaitUntilComplete() for request in request_list]) - - if failed_requests_count > 0: - module.fail_json( - msg='Unable to process server request') - - @staticmethod - def _refresh_servers(module, servers): - """ - Loop through a list of servers and refresh them. - :param module: the AnsibleModule object - :param servers: list of clc-sdk.Server instances to refresh - :return: none - """ - for server in servers: - try: - server.Refresh() - except CLCException as ex: - module.fail_json(msg='Unable to refresh the server {0}. {1}'.format( - server.id, ex.message - )) - - @staticmethod - def _add_public_ip_to_servers( - module, - should_add_public_ip, - servers, - public_ip_protocol, - public_ip_ports): - """ - Create a public IP for servers - :param module: the AnsibleModule object - :param should_add_public_ip: boolean - whether or not to provision a public ip for servers. Skipped if False - :param servers: List of servers to add public ips to - :param public_ip_protocol: a protocol to allow for the public ips - :param public_ip_ports: list of ports to allow for the public ips - :return: none - """ - failed_servers = [] - if not should_add_public_ip: - return failed_servers - - ports_lst = [] - request_list = [] - server = None - - for port in public_ip_ports: - ports_lst.append( - {'protocol': public_ip_protocol, 'port': port}) - try: - if not module.check_mode: - for server in servers: - request = server.PublicIPs().Add(ports_lst) - request_list.append(request) - except APIFailedResponse: - failed_servers.append(server) - ClcServer._wait_for_requests(module, request_list) - return failed_servers - - @staticmethod - def _add_alert_policy_to_servers(clc, module, servers): - """ - Associate the alert policy to servers - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param servers: List of servers to add alert policy to - :return: failed_servers: the list of servers which failed while associating alert policy - """ - failed_servers = [] - p = module.params - alert_policy_id = p.get('alert_policy_id') - alias = p.get('alias') - - if alert_policy_id and not module.check_mode: - for server in servers: - try: - ClcServer._add_alert_policy_to_server( - clc=clc, - alias=alias, - server_id=server.id, - alert_policy_id=alert_policy_id) - except CLCException: - failed_servers.append(server) - return failed_servers - - @staticmethod - def _add_alert_policy_to_server( - clc, alias, server_id, alert_policy_id): - """ - Associate an alert policy to a clc server - :param clc: the clc-sdk instance to use - :param alias: the clc account alias - :param server_id: The clc server id - :param alert_policy_id: the alert policy id to be associated to the server - :return: none - """ - try: - clc.v2.API.Call( - method='POST', - url='servers/%s/%s/alertPolicies' % (alias, server_id), - payload=json.dumps( - { - 'id': alert_policy_id - })) - except APIFailedResponse as e: - raise CLCException( - 'Failed to associate alert policy to the server : {0} with Error {1}'.format( - server_id, str(e.response_text))) - - @staticmethod - def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name): - """ - Returns the alert policy id for the given alert policy name - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the clc account alias - :param alert_policy_name: the name of the alert policy - :return: alert_policy_id: the alert policy id - """ - alert_policy_id = None - policies = clc.v2.API.Call('GET', '/v2/alertPolicies/%s' % alias) - if not policies: - return alert_policy_id - for policy in policies.get('items'): - if policy.get('name') == alert_policy_name: - if not alert_policy_id: - alert_policy_id = policy.get('id') - else: - return module.fail_json( - msg='multiple alert policies were found with policy name : %s' % alert_policy_name) - return alert_policy_id - - @staticmethod - def _delete_servers(module, clc, server_ids): - """ - Delete the servers on the provided list - :param module: the AnsibleModule object - :param clc: the clc-sdk instance to use - :param server_ids: list of servers to delete - :return: a list of dictionaries with server information about the servers that were deleted - """ - terminated_server_ids = [] - server_dict_array = [] - request_list = [] - - if not isinstance(server_ids, list) or len(server_ids) < 1: - return module.fail_json( - msg='server_ids should be a list of servers, aborting') - - servers = clc.v2.Servers(server_ids).Servers() - for server in servers: - if not module.check_mode: - request_list.append(server.Delete()) - ClcServer._wait_for_requests(module, request_list) - - for server in servers: - terminated_server_ids.append(server.id) - - return True, server_dict_array, terminated_server_ids - - @staticmethod - def _start_stop_servers(module, clc, server_ids): - """ - Start or Stop the servers on the provided list - :param module: the AnsibleModule object - :param clc: the clc-sdk instance to use - :param server_ids: list of servers to start or stop - :return: a list of dictionaries with server information about the servers that were started or stopped - """ - p = module.params - state = p.get('state') - changed = False - changed_servers = [] - server_dict_array = [] - result_server_ids = [] - request_list = [] - - if not isinstance(server_ids, list) or len(server_ids) < 1: - return module.fail_json( - msg='server_ids should be a list of servers, aborting') - - servers = clc.v2.Servers(server_ids).Servers() - for server in servers: - if server.powerState != state: - changed_servers.append(server) - if not module.check_mode: - request_list.append( - ClcServer._change_server_power_state( - module, - server, - state)) - changed = True - - ClcServer._wait_for_requests(module, request_list) - ClcServer._refresh_servers(module, changed_servers) - - for server in set(changed_servers + servers): - try: - server.data['ipaddress'] = server.details[ - 'ipAddresses'][0]['internal'] - server.data['publicip'] = str( - server.PublicIPs().public_ips[0]) - except (KeyError, IndexError): - pass - - server_dict_array.append(server.data) - result_server_ids.append(server.id) - - return changed, server_dict_array, result_server_ids - - @staticmethod - def _change_server_power_state(module, server, state): - """ - Change the server powerState - :param module: the module to check for intended state - :param server: the server to start or stop - :param state: the intended powerState for the server - :return: the request object from clc-sdk call - """ - result = None - try: - if state == 'started': - result = server.PowerOn() - else: - # Try to shut down the server and fall back to power off when unable to shut down. - result = server.ShutDown() - if result and hasattr(result, 'requests') and result.requests[0]: - return result - else: - result = server.PowerOff() - except CLCException: - module.fail_json( - msg='Unable to change power state for server {0}'.format( - server.id)) - return result - - @staticmethod - def _find_running_servers_by_group(module, datacenter, count_group): - """ - Find a list of running servers in the provided group - :param module: the AnsibleModule object - :param datacenter: the clc-sdk.Datacenter instance to use to lookup the group - :param count_group: the group to count the servers - :return: list of servers, and list of running servers - """ - group = ClcServer._find_group( - module=module, - datacenter=datacenter, - lookup_group=count_group) - - servers = group.Servers().Servers() - running_servers = [] - - for server in servers: - if server.status == 'active' and server.powerState == 'started': - running_servers.append(server) - - return servers, running_servers - - @staticmethod - def _find_group(module, datacenter, lookup_group=None): - """ - Find a server group in a datacenter by calling the CLC API - :param module: the AnsibleModule instance - :param datacenter: clc-sdk.Datacenter instance to search for the group - :param lookup_group: string name of the group to search for - :return: clc-sdk.Group instance - """ - if not lookup_group: - lookup_group = module.params.get('group') - try: - return datacenter.Groups().Get(lookup_group) - except CLCException: - pass - - # The search above only acts on the main - result = ClcServer._find_group_recursive( - module, - datacenter.Groups(), - lookup_group) - - if result is None: - module.fail_json( - msg=str( - "Unable to find group: " + - lookup_group + - " in location: " + - datacenter.id)) - - return result - - @staticmethod - def _find_group_recursive(module, group_list, lookup_group): - """ - Find a server group by recursively walking the tree - :param module: the AnsibleModule instance to use - :param group_list: a list of groups to search - :param lookup_group: the group to look for - :return: list of groups - """ - result = None - for group in group_list.groups: - subgroups = group.Subgroups() - try: - return subgroups.Get(lookup_group) - except CLCException: - result = ClcServer._find_group_recursive( - module, - subgroups, - lookup_group) - - if result is not None: - break - - return result - - @staticmethod - def _create_clc_server( - clc, - module, - server_params): - """ - Call the CLC Rest API to Create a Server - :param clc: the clc-python-sdk instance to use - :param module: the AnsibleModule instance to use - :param server_params: a dictionary of params to use to create the servers - :return: clc-sdk.Request object linked to the queued server request - """ - - try: - res = clc.v2.API.Call( - method='POST', - url='servers/%s' % - (server_params.get('alias')), - payload=json.dumps( - { - 'name': server_params.get('name'), - 'description': server_params.get('description'), - 'groupId': server_params.get('group_id'), - 'sourceServerId': server_params.get('template'), - 'isManagedOS': server_params.get('managed_os'), - 'primaryDNS': server_params.get('primary_dns'), - 'secondaryDNS': server_params.get('secondary_dns'), - 'networkId': server_params.get('network_id'), - 'ipAddress': server_params.get('ip_address'), - 'password': server_params.get('password'), - 'sourceServerPassword': server_params.get('source_server_password'), - 'cpu': server_params.get('cpu'), - 'cpuAutoscalePolicyId': server_params.get('cpu_autoscale_policy_id'), - 'memoryGB': server_params.get('memory'), - 'type': server_params.get('type'), - 'storageType': server_params.get('storage_type'), - 'antiAffinityPolicyId': server_params.get('anti_affinity_policy_id'), - 'customFields': server_params.get('custom_fields'), - 'additionalDisks': server_params.get('additional_disks'), - 'ttl': server_params.get('ttl'), - 'packages': server_params.get('packages'), - 'configurationId': server_params.get('configuration_id'), - 'osType': server_params.get('os_type')})) - - result = clc.v2.Requests(res) - except APIFailedResponse as ex: - return module.fail_json(msg='Unable to create the server: {0}. {1}'.format( - server_params.get('name'), - ex.response_text - )) - - # - # Patch the Request object so that it returns a valid server - - # Find the server's UUID from the API response - server_uuid = [obj['id'] - for obj in res['links'] if obj['rel'] == 'self'][0] - - # Change the request server method to a _find_server_by_uuid closure so - # that it will work - result.requests[0].Server = lambda: ClcServer._find_server_by_uuid_w_retry( - clc, - module, - server_uuid, - server_params.get('alias')) - - return result - - @staticmethod - def _get_anti_affinity_policy_id(clc, module, alias, aa_policy_name): - """ - retrieves the anti affinity policy id of the server based on the name of the policy - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param alias: the CLC account alias - :param aa_policy_name: the anti affinity policy name - :return: aa_policy_id: The anti affinity policy id - """ - aa_policy_id = None - try: - aa_policies = clc.v2.API.Call(method='GET', - url='antiAffinityPolicies/%s' % alias) - except APIFailedResponse as ex: - return module.fail_json(msg='Unable to fetch anti affinity policies for account: {0}. {1}'.format( - alias, ex.response_text)) - for aa_policy in aa_policies.get('items'): - if aa_policy.get('name') == aa_policy_name: - if not aa_policy_id: - aa_policy_id = aa_policy.get('id') - else: - return module.fail_json( - msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name) - return aa_policy_id - - # - # This is the function that gets patched to the Request.server object using a lamda closure - # - - @staticmethod - def _find_server_by_uuid_w_retry( - clc, module, svr_uuid, alias=None, retries=5, back_out=2): - """ - Find the clc server by the UUID returned from the provisioning request. Retry the request if a 404 is returned. - :param clc: the clc-sdk instance to use - :param module: the AnsibleModule object - :param svr_uuid: UUID of the server - :param retries: the number of retry attempts to make prior to fail. default is 5 - :param alias: the Account Alias to search - :return: a clc-sdk.Server instance - """ - if not alias: - alias = clc.v2.Account.GetAlias() - - # Wait and retry if the api returns a 404 - while True: - retries -= 1 - try: - server_obj = clc.v2.API.Call( - method='GET', url='servers/%s/%s?uuid=true' % - (alias, svr_uuid)) - server_id = server_obj['id'] - server = clc.v2.Server( - id=server_id, - alias=alias, - server_obj=server_obj) - return server - - except APIFailedResponse as e: - if e.response_status_code != 404: - return module.fail_json( - msg='A failure response was received from CLC API when ' - 'attempting to get details for a server: UUID=%s, Code=%i, Message=%s' % - (svr_uuid, e.response_status_code, e.message)) - if retries == 0: - return module.fail_json( - msg='Unable to reach the CLC API after 5 attempts') - time.sleep(back_out) - back_out *= 2 - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - The main function. Instantiates the module and calls process_request. - :return: none - """ - argument_dict = ClcServer._define_module_argument_spec() - module = AnsibleModule(supports_check_mode=True, **argument_dict) - clc_server = ClcServer(module) - clc_server.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/clc_server_snapshot.py b/plugins/modules/clc_server_snapshot.py deleted file mode 100644 index 096abfe29b..0000000000 --- a/plugins/modules/clc_server_snapshot.py +++ /dev/null @@ -1,412 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015 CenturyLink -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: clc_server_snapshot -short_description: Create, Delete and Restore server snapshots in CenturyLink Cloud -description: - - An Ansible module to Create, Delete and Restore server snapshots in CenturyLink Cloud. -options: - server_ids: - description: - - The list of CLC server Ids. - type: list - required: true - elements: str - expiration_days: - description: - - The number of days to keep the server snapshot before it expires. - type: int - default: 7 - required: false - state: - description: - - The state to insure that the provided resources are in. - type: str - default: 'present' - required: false - choices: ['present', 'absent', 'restore'] - wait: - description: - - Whether to wait for the provisioning tasks to finish before returning. - default: 'True' - required: false - type: str -requirements: - - python = 2.7 - - requests >= 2.5.0 - - clc-sdk -author: "CLC Runner (@clc-runner)" -notes: - - To use this module, it is required to set the below environment variables which enables access to the - Centurylink Cloud - - CLC_V2_API_USERNAME, the account login id for the centurylink cloud - - CLC_V2_API_PASSWORD, the account password for the centurylink cloud - - Alternatively, the module accepts the API token and account alias. The API token can be generated using the - CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login - - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login - - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud - - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment. -''' - -EXAMPLES = ''' -# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples - -- name: Create server snapshot - community.general.clc_server_snapshot: - server_ids: - - UC1TEST-SVR01 - - UC1TEST-SVR02 - expiration_days: 10 - wait: true - state: present - -- name: Restore server snapshot - community.general.clc_server_snapshot: - server_ids: - - UC1TEST-SVR01 - - UC1TEST-SVR02 - wait: true - state: restore - -- name: Delete server snapshot - community.general.clc_server_snapshot: - server_ids: - - UC1TEST-SVR01 - - UC1TEST-SVR02 - wait: true - state: absent -''' - -RETURN = ''' -server_ids: - description: The list of server ids that are changed - returned: success - type: list - sample: - [ - "UC1TEST-SVR01", - "UC1TEST-SVR02" - ] -''' - -__version__ = '${version}' - -import os -import traceback - -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion - -REQUESTS_IMP_ERR = None -try: - import requests -except ImportError: - REQUESTS_IMP_ERR = traceback.format_exc() - REQUESTS_FOUND = False -else: - REQUESTS_FOUND = True - -# -# Requires the clc-python-sdk. -# sudo pip install clc-sdk -# -CLC_IMP_ERR = None -try: - import clc as clc_sdk - from clc import CLCException -except ImportError: - CLC_IMP_ERR = traceback.format_exc() - CLC_FOUND = False - clc_sdk = None -else: - CLC_FOUND = True - -from ansible.module_utils.basic import AnsibleModule, missing_required_lib - - -class ClcSnapshot: - - clc = clc_sdk - module = None - - def __init__(self, module): - """ - Construct module - """ - self.module = module - - if not CLC_FOUND: - self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR) - if not REQUESTS_FOUND: - self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR) - if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'): - self.module.fail_json( - msg='requests library version should be >= 2.5.0') - - self._set_user_agent(self.clc) - - def process_request(self): - """ - Process the request - Main Code Path - :return: Returns with either an exit_json or fail_json - """ - p = self.module.params - server_ids = p['server_ids'] - expiration_days = p['expiration_days'] - state = p['state'] - request_list = [] - changed = False - changed_servers = [] - - self._set_clc_credentials_from_env() - if state == 'present': - changed, request_list, changed_servers = self.ensure_server_snapshot_present( - server_ids=server_ids, - expiration_days=expiration_days) - elif state == 'absent': - changed, request_list, changed_servers = self.ensure_server_snapshot_absent( - server_ids=server_ids) - elif state == 'restore': - changed, request_list, changed_servers = self.ensure_server_snapshot_restore( - server_ids=server_ids) - - self._wait_for_requests_to_complete(request_list) - return self.module.exit_json( - changed=changed, - server_ids=changed_servers) - - def ensure_server_snapshot_present(self, server_ids, expiration_days): - """ - Ensures the given set of server_ids have the snapshots created - :param server_ids: The list of server_ids to create the snapshot - :param expiration_days: The number of days to keep the snapshot - :return: (changed, request_list, changed_servers) - changed: A flag indicating whether any change was made - request_list: the list of clc request objects from CLC API call - changed_servers: The list of servers ids that are modified - """ - request_list = [] - changed = False - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - servers_to_change = [ - server for server in servers if len( - server.GetSnapshots()) == 0] - for server in servers_to_change: - changed = True - if not self.module.check_mode: - request = self._create_server_snapshot(server, expiration_days) - request_list.append(request) - changed_servers = [ - server.id for server in servers_to_change if server.id] - return changed, request_list, changed_servers - - def _create_server_snapshot(self, server, expiration_days): - """ - Create the snapshot for the CLC server - :param server: the CLC server object - :param expiration_days: The number of days to keep the snapshot - :return: the create request object from CLC API Call - """ - result = None - try: - result = server.CreateSnapshot( - delete_existing=True, - expiration_days=expiration_days) - except CLCException as ex: - self.module.fail_json(msg='Failed to create snapshot for server : {0}. {1}'.format( - server.id, ex.response_text - )) - return result - - def ensure_server_snapshot_absent(self, server_ids): - """ - Ensures the given set of server_ids have the snapshots removed - :param server_ids: The list of server_ids to delete the snapshot - :return: (changed, request_list, changed_servers) - changed: A flag indicating whether any change was made - request_list: the list of clc request objects from CLC API call - changed_servers: The list of servers ids that are modified - """ - request_list = [] - changed = False - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - servers_to_change = [ - server for server in servers if len( - server.GetSnapshots()) > 0] - for server in servers_to_change: - changed = True - if not self.module.check_mode: - request = self._delete_server_snapshot(server) - request_list.append(request) - changed_servers = [ - server.id for server in servers_to_change if server.id] - return changed, request_list, changed_servers - - def _delete_server_snapshot(self, server): - """ - Delete snapshot for the CLC server - :param server: the CLC server object - :return: the delete snapshot request object from CLC API - """ - result = None - try: - result = server.DeleteSnapshot() - except CLCException as ex: - self.module.fail_json(msg='Failed to delete snapshot for server : {0}. {1}'.format( - server.id, ex.response_text - )) - return result - - def ensure_server_snapshot_restore(self, server_ids): - """ - Ensures the given set of server_ids have the snapshots restored - :param server_ids: The list of server_ids to delete the snapshot - :return: (changed, request_list, changed_servers) - changed: A flag indicating whether any change was made - request_list: the list of clc request objects from CLC API call - changed_servers: The list of servers ids that are modified - """ - request_list = [] - changed = False - servers = self._get_servers_from_clc( - server_ids, - 'Failed to obtain server list from the CLC API') - servers_to_change = [ - server for server in servers if len( - server.GetSnapshots()) > 0] - for server in servers_to_change: - changed = True - if not self.module.check_mode: - request = self._restore_server_snapshot(server) - request_list.append(request) - changed_servers = [ - server.id for server in servers_to_change if server.id] - return changed, request_list, changed_servers - - def _restore_server_snapshot(self, server): - """ - Restore snapshot for the CLC server - :param server: the CLC server object - :return: the restore snapshot request object from CLC API - """ - result = None - try: - result = server.RestoreSnapshot() - except CLCException as ex: - self.module.fail_json(msg='Failed to restore snapshot for server : {0}. {1}'.format( - server.id, ex.response_text - )) - return result - - def _wait_for_requests_to_complete(self, requests_lst): - """ - Waits until the CLC requests are complete if the wait argument is True - :param requests_lst: The list of CLC request objects - :return: none - """ - if not self.module.params['wait']: - return - for request in requests_lst: - request.WaitUntilComplete() - for request_details in request.requests: - if request_details.Status() != 'succeeded': - self.module.fail_json( - msg='Unable to process server snapshot request') - - @staticmethod - def define_argument_spec(): - """ - This function defines the dictionary object required for - package module - :return: the package dictionary object - """ - argument_spec = dict( - server_ids=dict(type='list', required=True, elements='str'), - expiration_days=dict(default=7, type='int'), - wait=dict(default=True), - state=dict( - default='present', - choices=[ - 'present', - 'absent', - 'restore']), - ) - return argument_spec - - def _get_servers_from_clc(self, server_list, message): - """ - Internal function to fetch list of CLC server objects from a list of server ids - :param server_list: The list of server ids - :param message: The error message to throw in case of any error - :return the list of CLC server objects - """ - try: - return self.clc.v2.Servers(server_list).servers - except CLCException as ex: - return self.module.fail_json(msg=message + ': %s' % ex) - - def _set_clc_credentials_from_env(self): - """ - Set the CLC Credentials on the sdk by reading environment variables - :return: none - """ - env = os.environ - v2_api_token = env.get('CLC_V2_API_TOKEN', False) - v2_api_username = env.get('CLC_V2_API_USERNAME', False) - v2_api_passwd = env.get('CLC_V2_API_PASSWD', False) - clc_alias = env.get('CLC_ACCT_ALIAS', False) - api_url = env.get('CLC_V2_API_URL', False) - - if api_url: - self.clc.defaults.ENDPOINT_URL_V2 = api_url - - if v2_api_token and clc_alias: - self.clc._LOGIN_TOKEN_V2 = v2_api_token - self.clc._V2_ENABLED = True - self.clc.ALIAS = clc_alias - elif v2_api_username and v2_api_passwd: - self.clc.v2.SetCredentials( - api_username=v2_api_username, - api_passwd=v2_api_passwd) - else: - return self.module.fail_json( - msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD " - "environment variables") - - @staticmethod - def _set_user_agent(clc): - if hasattr(clc, 'SetRequestsSession'): - agent_string = "ClcAnsibleModule/" + __version__ - ses = requests.Session() - ses.headers.update({"Api-Client": agent_string}) - ses.headers['User-Agent'] += " " + agent_string - clc.SetRequestsSession(ses) - - -def main(): - """ - Main function - :return: None - """ - module = AnsibleModule( - argument_spec=ClcSnapshot.define_argument_spec(), - supports_check_mode=True - ) - clc_snapshot = ClcSnapshot(module) - clc_snapshot.process_request() - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/cloud_init_data_facts.py b/plugins/modules/cloud_init_data_facts.py index d8209cc61a..8da427fa2e 100644 --- a/plugins/modules/cloud_init_data_facts.py +++ b/plugins/modules/cloud_init_data_facts.py @@ -1,19 +1,16 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2018, René Moser # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: cloud_init_data_facts short_description: Retrieve facts of cloud-init description: - - Gathers facts by reading the status.json and result.json of cloud-init. + - Gathers facts by reading the C(status.json) and C(result.json) of cloud-init. author: René Moser (@resmo) extends_documentation_fragment: - community.general.attributes @@ -22,14 +19,14 @@ extends_documentation_fragment: options: filter: description: - - Filter facts + - Filter facts. type: str - choices: [ status, result ] + choices: [status, result] notes: - See http://cloudinit.readthedocs.io/ for more information about cloud-init. -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Gather all facts of cloud init community.general.cloud_init_data_facts: register: result @@ -44,47 +41,49 @@ EXAMPLES = ''' until: "res.cloud_init_data_facts.status.v1.stage is defined and not res.cloud_init_data_facts.status.v1.stage" retries: 50 delay: 5 -''' +""" -RETURN = ''' ---- +RETURN = r""" cloud_init_data_facts: description: Facts of result and status. returned: success type: dict - sample: '{ - "status": { + sample: + { + "status": { "v1": { - "datasource": "DataSourceCloudStack", - "errors": [] - }, - "result": { - "v1": { - "datasource": "DataSourceCloudStack", - "init": { - "errors": [], - "finished": 1522066377.0185432, - "start": 1522066375.2648022 - }, - "init-local": { - "errors": [], - "finished": 1522066373.70919, - "start": 1522066373.4726632 - }, - "modules-config": { - "errors": [], - "finished": 1522066380.9097016, - "start": 1522066379.0011985 - }, - "modules-final": { - "errors": [], - "finished": 1522066383.56594, - "start": 1522066382.3449218 - }, - "stage": null + "datasource": "DataSourceCloudStack", + "errors": [] } - }' -''' + }, + "result": { + "v1": { + "datasource": "DataSourceCloudStack", + "init": { + "errors": [], + "finished": 1522066377.0185432, + "start": 1522066375.2648022 + }, + "init-local": { + "errors": [], + "finished": 1522066373.70919, + "start": 1522066373.4726632 + }, + "modules-config": { + "errors": [], + "finished": 1522066380.9097016, + "start": 1522066379.0011985 + }, + "modules-final": { + "errors": [], + "finished": 1522066383.56594, + "start": 1522066382.3449218 + }, + "stage": null + } + } + } +""" import os @@ -107,9 +106,8 @@ def gather_cloud_init_data_facts(module): json_file = os.path.join(CLOUD_INIT_PATH, i + '.json') if os.path.exists(json_file): - f = open(json_file, 'rb') - contents = to_text(f.read(), errors='surrogate_or_strict') - f.close() + with open(json_file, 'rb') as f: + contents = to_text(f.read(), errors='surrogate_or_strict') if contents: res['cloud_init_data_facts'][i] = module.from_json(contents) diff --git a/plugins/modules/cloudflare_dns.py b/plugins/modules/cloudflare_dns.py index 92132c0f6f..df10d0a0b6 100644 --- a/plugins/modules/cloudflare_dns.py +++ b/plugins/modules/cloudflare_dns.py @@ -1,160 +1,185 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2016 Michael Gruener # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: cloudflare_dns author: -- Michael Gruener (@mgruener) -requirements: - - python >= 2.6 + - Michael Gruener (@mgruener) short_description: Manage Cloudflare DNS records description: - - "Manages dns records via the Cloudflare API, see the docs: U(https://api.cloudflare.com/)." + - 'Manages DNS records using the Cloudflare API, see the docs: U(https://api.cloudflare.com/).' +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: api_token: description: - - API token. - - Required for api token authentication. - - "You can obtain your API token from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)." - - Can be specified in C(CLOUDFLARE_TOKEN) environment variable since community.general 2.0.0. + - API token. + - Required for API token authentication. + - "You can obtain your API token from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)." + - Can be specified in E(CLOUDFLARE_TOKEN) environment variable since community.general 2.0.0. type: str - required: false version_added: '0.2.0' account_api_key: description: - - Account API key. - - Required for api keys authentication. - - "You can obtain your API key from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)." + - Account API key. + - Required for API keys authentication. + - "You can obtain your API key from the bottom of the Cloudflare 'My Account' page, found here: U(https://dash.cloudflare.com/)." type: str - required: false - aliases: [ account_api_token ] + aliases: [account_api_token] account_email: description: - - Account email. Required for API keys authentication. + - Account email. Required for API keys authentication. type: str - required: false algorithm: description: - - Algorithm number. - - Required for I(type=DS) and I(type=SSHFP) when I(state=present). + - Algorithm number. + - Required for O(type=DS) and O(type=SSHFP) when O(state=present). type: int cert_usage: description: - - Certificate usage number. - - Required for I(type=TLSA) when I(state=present). + - Certificate usage number. + - Required for O(type=TLSA) when O(state=present). type: int - choices: [ 0, 1, 2, 3 ] + choices: [0, 1, 2, 3] + comment: + description: + - Comments or notes about the DNS record. + type: str + version_added: 10.1.0 + flag: + description: + - Issuer Critical Flag. + - Required for O(type=CAA) when O(state=present). + type: int + choices: [0, 1] + version_added: 8.0.0 + tag: + description: + - CAA issue restriction. + - Required for O(type=CAA) when O(state=present). + type: str + choices: [issue, issuewild, iodef] + version_added: 8.0.0 hash_type: description: - - Hash type number. - - Required for I(type=DS), I(type=SSHFP) and I(type=TLSA) when I(state=present). + - Hash type number. + - Required for O(type=DS), O(type=SSHFP) and O(type=TLSA) when O(state=present). type: int - choices: [ 1, 2 ] + choices: [1, 2] key_tag: description: - - DNSSEC key tag. - - Needed for I(type=DS) when I(state=present). + - DNSSEC key tag. + - Needed for O(type=DS) when O(state=present). type: int port: description: - - Service port. - - Required for I(type=SRV) and I(type=TLSA). + - Service port. + - Required for O(type=SRV) and O(type=TLSA). type: int priority: description: - - Record priority. - - Required for I(type=MX) and I(type=SRV) + - Record priority. + - Required for O(type=MX) and O(type=SRV). default: 1 type: int proto: description: - - Service protocol. Required for I(type=SRV) and I(type=TLSA). - - Common values are TCP and UDP. - - Before Ansible 2.6 only TCP and UDP were available. + - Service protocol. Required for O(type=SRV) and O(type=TLSA). + - Common values are TCP and UDP. type: str proxied: description: - - Proxy through Cloudflare network or just use DNS. + - Proxy through Cloudflare network or just use DNS. type: bool default: false record: description: - - Record to add. - - Required if I(state=present). - - Default is C(@) (e.g. the zone name). + - Record to add. + - Required if O(state=present). + - Default is V(@) (that is, the zone name). type: str default: '@' - aliases: [ name ] + aliases: [name] selector: description: - - Selector number. - - Required for I(type=TLSA) when I(state=present). - choices: [ 0, 1 ] + - Selector number. + - Required for O(type=TLSA) when O(state=present). + choices: [0, 1] type: int service: description: - - Record service. - - Required for I(type=SRV). + - Record service. + - Required for O(type=SRV). type: str solo: description: - - Whether the record should be the only one for that record type and record name. - - Only use with I(state=present). - - This will delete all other records with the same record name and type. + - Whether the record should be the only one for that record type and record name. + - Only use with O(state=present). + - This deletes all other records with the same record name and type. type: bool state: description: - - Whether the record(s) should exist or not. + - Whether the record(s) should exist or not. type: str - choices: [ absent, present ] + choices: [absent, present] default: present + tags: + description: + - Custom tags for the DNS record. + type: list + elements: str + version_added: 10.1.0 timeout: description: - - Timeout for Cloudflare API calls. + - Timeout for Cloudflare API calls. type: int default: 30 ttl: description: - - The TTL to give the new record. - - Must be between 120 and 2,147,483,647 seconds, or 1 for automatic. + - The TTL to give the new record. + - Must be between V(120) and V(2,147,483,647) seconds, or V(1) for automatic. type: int default: 1 type: description: - - The type of DNS record to create. Required if I(state=present). - - I(type=DS), I(type=SSHFP) and I(type=TLSA) added in Ansible 2.7. + - The type of DNS record to create. Required if O(state=present). + - Support for V(SPF) has been removed from community.general 9.0.0 since that record type is no longer supported by + CloudFlare. + - Support for V(PTR) has been added in community.general 11.1.0. type: str - choices: [ A, AAAA, CNAME, DS, MX, NS, SPF, SRV, SSHFP, TLSA, TXT ] + choices: [A, AAAA, CNAME, DS, MX, NS, SRV, SSHFP, TLSA, CAA, TXT, PTR] value: description: - - The record value. - - Required for I(state=present). + - The record value. + - Required for O(state=present). type: str - aliases: [ content ] + aliases: [content] weight: description: - - Service weight. - - Required for I(type=SRV). + - Service weight. + - Required for O(type=SRV). type: int default: 1 zone: description: - - The name of the Zone to work with (e.g. "example.com"). - - The Zone must already exist. + - The name of the Zone to work with (for example V(example.com)). + - The Zone must already exist. type: str required: true - aliases: [ domain ] -''' + aliases: [domain] +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a test.example.net A record to point to 127.0.0.1 community.general.cloudflare_dns: zone: example.net @@ -173,6 +198,18 @@ EXAMPLES = r''' value: 127.0.0.1 api_token: dummyapitoken +- name: Create a record with comment and tags + community.general.cloudflare_dns: + zone: example.net + record: test + type: A + value: 127.0.0.1 + comment: Local test website + tags: + - test + - local + api_token: dummyapitoken + - name: Create a example.net CNAME record to example.com community.general.cloudflare_dns: zone: example.net @@ -255,6 +292,15 @@ EXAMPLES = r''' hash_type: 1 value: 6b76d034492b493e15a7376fccd08e63befdad0edab8e442562f532338364bf3 +- name: Create a CAA record subdomain.example.com + community.general.cloudflare_dns: + zone: example.com + record: subdomain + type: CAA + flag: 0 + tag: issue + value: ca.example.com + - name: Create a DS record for subdomain.example.com community.general.cloudflare_dns: zone: example.com @@ -264,111 +310,147 @@ EXAMPLES = r''' algorithm: 8 hash_type: 2 value: B4EB5AC4467D2DFB3BAF9FB9961DC1B6FED54A58CDFAA3E465081EC86F89BFAB -''' -RETURN = r''' +- name: Create PTR record "1.2.0.192.in-addr.arpa" with value "test.example.com" + community.general.cloudflare_dns: + zone: 2.0.192.in-addr.arpa + record: 1 + type: PTR + value: test.example.com + state: present +""" + +RETURN = r""" record: - description: A dictionary containing the record data. - returned: success, except on record deletion - type: complex - contains: - content: - description: The record content (details depend on record type). - returned: success - type: str - sample: 192.0.2.91 - created_on: - description: The record creation date. - returned: success - type: str - sample: "2016-03-25T19:09:42.516553Z" - data: - description: Additional record data. - returned: success, if type is SRV, DS, SSHFP or TLSA - type: dict - sample: { - name: "jabber", - port: 8080, - priority: 10, - proto: "_tcp", - service: "_xmpp", - target: "jabberhost.sample.com", - weight: 5, - } - id: - description: The record ID. - returned: success - type: str - sample: f9efb0549e96abcb750de63b38c9576e - locked: - description: No documentation available. - returned: success - type: bool - sample: false - meta: - description: No documentation available. - returned: success - type: dict - sample: { auto_added: false } - modified_on: - description: Record modification date. - returned: success - type: str - sample: "2016-03-25T19:09:42.516553Z" - name: - description: The record name as FQDN (including _service and _proto for SRV). - returned: success - type: str - sample: www.sample.com - priority: - description: Priority of the MX record. - returned: success, if type is MX - type: int - sample: 10 - proxiable: - description: Whether this record can be proxied through Cloudflare. - returned: success - type: bool - sample: false - proxied: - description: Whether the record is proxied through Cloudflare. - returned: success - type: bool - sample: false - ttl: - description: The time-to-live for the record. - returned: success - type: int - sample: 300 - type: - description: The record type. - returned: success - type: str - sample: A - zone_id: - description: The ID of the zone containing the record. - returned: success - type: str - sample: abcede0bf9f0066f94029d2e6b73856a - zone_name: - description: The name of the zone containing the record. - returned: success - type: str - sample: sample.com -''' + description: A dictionary containing the record data. + returned: success, except on record deletion + type: complex + contains: + comment: + description: Comments or notes about the DNS record. + returned: success + type: str + sample: Domain verification record + version_added: 10.1.0 + comment_modified_on: + description: When the record comment was last modified. Omitted if there is no comment. + returned: success + type: str + sample: "2024-01-01T05:20:00.12345Z" + version_added: 10.1.0 + content: + description: The record content (details depend on record type). + returned: success + type: str + sample: 192.0.2.91 + created_on: + description: The record creation date. + returned: success + type: str + sample: "2016-03-25T19:09:42.516553Z" + data: + description: Additional record data. + returned: success, if type is SRV, DS, SSHFP TLSA or CAA + type: dict + sample: + { + "name": "jabber", + "port": 8080, + "priority": 10, + "proto": "_tcp", + "service": "_xmpp", + "target": "jabberhost.sample.com", + "weight": 5 + } + id: + description: The record ID. + returned: success + type: str + sample: f9efb0549e96abcb750de63b38c9576e + locked: + description: No documentation available. + returned: success + type: bool + sample: false + meta: + description: Extra Cloudflare-specific information about the record. + returned: success + type: dict + sample: {"auto_added": false} + modified_on: + description: Record modification date. + returned: success + type: str + sample: "2016-03-25T19:09:42.516553Z" + name: + description: The record name as FQDN (including _service and _proto for SRV). + returned: success + type: str + sample: www.sample.com + priority: + description: Priority of the MX record. + returned: success, if type is MX + type: int + sample: 10 + proxiable: + description: Whether this record can be proxied through Cloudflare. + returned: success + type: bool + sample: false + proxied: + description: Whether the record is proxied through Cloudflare. + returned: success + type: bool + sample: false + tags: + description: Custom tags for the DNS record. + returned: success + type: list + elements: str + sample: ["production", "app"] + version_added: 10.1.0 + tags_modified_on: + description: When the record tags were last modified. Omitted if there are no tags. + returned: success + type: str + sample: "2025-01-01T05:20:00.12345Z" + version_added: 10.1.0 + ttl: + description: The time-to-live for the record. + returned: success + type: int + sample: 300 + type: + description: The record type. + returned: success + type: str + sample: A + zone_id: + description: The ID of the zone containing the record. + returned: success + type: str + sample: abcede0bf9f0066f94029d2e6b73856a + zone_name: + description: The name of the zone containing the record. + returned: success + type: str + sample: sample.com +""" import json +from urllib.parse import urlencode from ansible.module_utils.basic import AnsibleModule, env_fallback -from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.common.text.converters import to_native, to_text from ansible.module_utils.urls import fetch_url def lowercase_string(param): - if not isinstance(param, str): - return param - return param.lower() + return param.lower() if isinstance(param, str) else param + + +def join_str(sep, *args): + return sep.join([str(arg) for arg in args]) class CloudflareAPI(object): @@ -383,7 +465,11 @@ class CloudflareAPI(object): self.account_email = module.params['account_email'] self.algorithm = module.params['algorithm'] self.cert_usage = module.params['cert_usage'] + self.comment = module.params['comment'] self.hash_type = module.params['hash_type'] + self.flag = module.params['flag'] + self.tag = module.params['tag'] + self.tags = module.params['tags'] self.key_tag = module.params['key_tag'] self.port = module.params['port'] self.priority = module.params['priority'] @@ -410,29 +496,29 @@ class CloudflareAPI(object): if (self.type == 'AAAA') and (self.value is not None): self.value = self.value.lower() - if (self.type == 'SRV'): + if self.type == 'SRV': if (self.proto is not None) and (not self.proto.startswith('_')): - self.proto = '_' + self.proto + self.proto = '_{0}'.format(self.proto) if (self.service is not None) and (not self.service.startswith('_')): - self.service = '_' + self.service + self.service = '_{0}'.format(self.service) - if (self.type == 'TLSA'): + if self.type == 'TLSA': if (self.proto is not None) and (not self.proto.startswith('_')): - self.proto = '_' + self.proto + self.proto = '_{0}'.format(self.proto) if (self.port is not None): - self.port = '_' + str(self.port) + self.port = '_{0}'.format(self.port) if not self.record.endswith(self.zone): - self.record = self.record + '.' + self.zone + self.record = join_str('.', self.record, self.zone) - if (self.type == 'DS'): + if self.type == 'DS': if self.record == self.zone: self.module.fail_json(msg="DS records only apply to subdomains.") def _cf_simple_api_call(self, api_call, method='GET', payload=None): if self.api_token: headers = { - 'Authorization': 'Bearer ' + self.api_token, + 'Authorization': 'Bearer {0}'.format(self.api_token), 'Content-Type': 'application/json', } else: @@ -482,6 +568,9 @@ class CloudflareAPI(object): try: content = resp.read() except AttributeError: + content = None + + if not content: if info['body']: content = info['body'] else: @@ -529,7 +618,7 @@ class CloudflareAPI(object): else: raw_api_call = api_call while next_page <= pagination['total_pages']: - raw_api_call += '?' + '&'.join(parameters) + raw_api_call += '?{0}'.format('&'.join(parameters)) result, status = self._cf_simple_api_call(raw_api_call, method, payload) data += result['result'] next_page += 1 @@ -554,8 +643,8 @@ class CloudflareAPI(object): name = self.zone param = '' if name: - param = '?' + urlencode({'name': name}) - zones, status = self._cf_api_call('/zones' + param) + param = '?{0}'.format(urlencode({'name': name})) + zones, status = self._cf_api_call('/zones{0}'.format(param)) return zones def get_dns_records(self, zone_name=None, type=None, record=None, value=''): @@ -580,195 +669,212 @@ class CloudflareAPI(object): if value: query['content'] = value if query: - api_call += '?' + urlencode(query) + api_call += '?{0}'.format(urlencode(query)) records, status = self._cf_api_call(api_call) return records - def delete_dns_records(self, **kwargs): - params = {} - for param in ['port', 'proto', 'service', 'solo', 'type', 'record', 'value', 'weight', 'zone', - 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag']: - if param in kwargs: - params[param] = kwargs[param] - else: - params[param] = getattr(self, param) - + def delete_dns_records(self, solo): records = [] - content = params['value'] - search_record = params['record'] - if params['type'] == 'SRV': - if not (params['value'] is None or params['value'] == ''): - content = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value'] - search_record = params['service'] + '.' + params['proto'] + '.' + params['record'] - elif params['type'] == 'DS': - if not (params['value'] is None or params['value'] == ''): - content = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value'] - elif params['type'] == 'SSHFP': - if not (params['value'] is None or params['value'] == ''): - content = str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value'] - elif params['type'] == 'TLSA': - if not (params['value'] is None or params['value'] == ''): - content = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value'] - search_record = params['port'] + '.' + params['proto'] + '.' + params['record'] - if params['solo']: + content = self.value + search_record = self.record + if self.type == 'SRV': + if not (self.value is None or self.value == ''): + content = join_str('\t', self.weight, self.port, self.value) + search_record = join_str('.', self.service, self.proto, self.record) + elif self.type == 'DS': + if not (self.value is None or self.value == ''): + content = join_str('\t', self.key_tag, self.algorithm, self.hash_type, self.value) + elif self.type == 'SSHFP': + if not (self.value is None or self.value == ''): + content = join_str(' ', self.algorithm, self.hash_type, self.value.upper()) + elif self.type == 'TLSA': + if not (self.value is None or self.value == ''): + content = join_str('\t', self.cert_usage, self.selector, self.hash_type, self.value) + search_record = join_str('.', self.port, self.proto, self.record) + if solo: search_value = None else: search_value = content - records = self.get_dns_records(params['zone'], params['type'], search_record, search_value) + zone_id = self._get_zone_id(self.zone) + records = self.get_dns_records(self.zone, self.type, search_record, search_value) for rr in records: - if params['solo']: - if not ((rr['type'] == params['type']) and (rr['name'] == search_record) and (rr['content'] == content)): + if solo: + if not ((rr['type'] == self.type) and (rr['name'] == search_record) and (rr['content'] == content)): self.changed = True if not self.module.check_mode: - result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE') + result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id, rr['id']), 'DELETE') else: self.changed = True if not self.module.check_mode: - result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(rr['zone_id'], rr['id']), 'DELETE') + result, info = self._cf_api_call('/zones/{0}/dns_records/{1}'.format(zone_id, rr['id']), 'DELETE') return self.changed - def ensure_dns_record(self, **kwargs): - params = {} - for param in ['port', 'priority', 'proto', 'proxied', 'service', 'ttl', 'type', 'record', 'value', 'weight', 'zone', - 'algorithm', 'cert_usage', 'hash_type', 'selector', 'key_tag']: - if param in kwargs: - params[param] = kwargs[param] - else: - params[param] = getattr(self, param) - - search_value = params['value'] - search_record = params['record'] + def ensure_dns_record(self): + search_value = self.value + search_record = self.record new_record = None - if (params['type'] is None) or (params['record'] is None): - self.module.fail_json(msg="You must provide a type and a record to create a new record") - if (params['type'] in ['A', 'AAAA', 'CNAME', 'TXT', 'MX', 'NS', 'SPF']): - if not params['value']: + if self.type in ['A', 'AAAA', 'CNAME', 'TXT', 'MX', 'NS', 'PTR']: + if not self.value: self.module.fail_json(msg="You must provide a non-empty value to create this record type") # there can only be one CNAME per record # ignoring the value when searching for existing # CNAME records allows us to update the value if it # changes - if params['type'] == 'CNAME': + if self.type == 'CNAME': search_value = None new_record = { - "type": params['type'], - "name": params['record'], - "content": params['value'], - "ttl": params['ttl'] + "type": self.type, + "name": self.record, + "content": self.value, + "ttl": self.ttl } - if (params['type'] in ['A', 'AAAA', 'CNAME']): - new_record["proxied"] = params["proxied"] + if self.type in ['A', 'AAAA', 'CNAME']: + new_record["proxied"] = self.proxied - if params['type'] == 'MX': - for attr in [params['priority'], params['value']]: + if self.type == 'MX': + for attr in [self.priority, self.value]: if (attr is None) or (attr == ''): self.module.fail_json(msg="You must provide priority and a value to create this record type") new_record = { - "type": params['type'], - "name": params['record'], - "content": params['value'], - "priority": params['priority'], - "ttl": params['ttl'] + "type": self.type, + "name": self.record, + "content": self.value, + "priority": self.priority, + "ttl": self.ttl } - if params['type'] == 'SRV': - for attr in [params['port'], params['priority'], params['proto'], params['service'], params['weight'], params['value']]: + if self.type == 'SRV': + for attr in [self.port, self.priority, self.proto, self.service, self.weight, self.value]: if (attr is None) or (attr == ''): self.module.fail_json(msg="You must provide port, priority, proto, service, weight and a value to create this record type") srv_data = { - "target": params['value'], - "port": params['port'], - "weight": params['weight'], - "priority": params['priority'], - "name": params['record'][:-len('.' + params['zone'])], - "proto": params['proto'], - "service": params['service'] + "target": self.value, + "port": self.port, + "weight": self.weight, + "priority": self.priority, } - new_record = {"type": params['type'], "ttl": params['ttl'], 'data': srv_data} - search_value = str(params['weight']) + '\t' + str(params['port']) + '\t' + params['value'] - search_record = params['service'] + '.' + params['proto'] + '.' + params['record'] - if params['type'] == 'DS': - for attr in [params['key_tag'], params['algorithm'], params['hash_type'], params['value']]: + new_record = { + "type": self.type, + "name": join_str('.', self.service, self.proto, self.record), + "ttl": self.ttl, + 'data': srv_data, + } + search_value = join_str('\t', self.weight, self.port, self.value) + search_record = join_str('.', self.service, self.proto, self.record) + + if self.type == 'DS': + for attr in [self.key_tag, self.algorithm, self.hash_type, self.value]: if (attr is None) or (attr == ''): self.module.fail_json(msg="You must provide key_tag, algorithm, hash_type and a value to create this record type") ds_data = { - "key_tag": params['key_tag'], - "algorithm": params['algorithm'], - "digest_type": params['hash_type'], - "digest": params['value'], + "key_tag": self.key_tag, + "algorithm": self.algorithm, + "digest_type": self.hash_type, + "digest": self.value, } new_record = { - "type": params['type'], - "name": params['record'], + "type": self.type, + "name": self.record, 'data': ds_data, - "ttl": params['ttl'], + "ttl": self.ttl, } - search_value = str(params['key_tag']) + '\t' + str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value'] + search_value = join_str('\t', self.key_tag, self.algorithm, self.hash_type, self.value) - if params['type'] == 'SSHFP': - for attr in [params['algorithm'], params['hash_type'], params['value']]: + if self.type == 'SSHFP': + for attr in [self.algorithm, self.hash_type, self.value]: if (attr is None) or (attr == ''): self.module.fail_json(msg="You must provide algorithm, hash_type and a value to create this record type") sshfp_data = { - "fingerprint": params['value'], - "type": params['hash_type'], - "algorithm": params['algorithm'], + "fingerprint": self.value.upper(), + "type": self.hash_type, + "algorithm": self.algorithm, } new_record = { - "type": params['type'], - "name": params['record'], + "type": self.type, + "name": self.record, 'data': sshfp_data, - "ttl": params['ttl'], + "ttl": self.ttl, } - search_value = str(params['algorithm']) + '\t' + str(params['hash_type']) + '\t' + params['value'] + search_value = join_str(' ', self.algorithm, self.hash_type, self.value) - if params['type'] == 'TLSA': - for attr in [params['port'], params['proto'], params['cert_usage'], params['selector'], params['hash_type'], params['value']]: + if self.type == 'TLSA': + for attr in [self.port, self.proto, self.cert_usage, self.selector, self.hash_type, self.value]: if (attr is None) or (attr == ''): self.module.fail_json(msg="You must provide port, proto, cert_usage, selector, hash_type and a value to create this record type") - search_record = params['port'] + '.' + params['proto'] + '.' + params['record'] + search_record = join_str('.', self.port, self.proto, self.record) tlsa_data = { - "usage": params['cert_usage'], - "selector": params['selector'], - "matching_type": params['hash_type'], - "certificate": params['value'], + "usage": self.cert_usage, + "selector": self.selector, + "matching_type": self.hash_type, + "certificate": self.value, } new_record = { - "type": params['type'], + "type": self.type, "name": search_record, 'data': tlsa_data, - "ttl": params['ttl'], + "ttl": self.ttl, } - search_value = str(params['cert_usage']) + '\t' + str(params['selector']) + '\t' + str(params['hash_type']) + '\t' + params['value'] + search_value = join_str('\t', self.cert_usage, self.selector, self.hash_type, self.value) - zone_id = self._get_zone_id(params['zone']) - records = self.get_dns_records(params['zone'], params['type'], search_record, search_value) + if self.type == 'CAA': + for attr in [self.flag, self.tag, self.value]: + if attr == '': + self.module.fail_json(msg="You must provide flag, tag and a value to create this record type") + caa_data = { + "flags": self.flag, + "tag": self.tag, + "value": self.value, + } + new_record = { + "type": self.type, + "name": self.record, + 'data': caa_data, + "ttl": self.ttl, + } + search_value = None + + new_record['comment'] = self.comment or None + new_record['tags'] = self.tags or [] + + zone_id = self._get_zone_id(self.zone) + records = self.get_dns_records(self.zone, self.type, search_record, search_value) # in theory this should be impossible as cloudflare does not allow # the creation of duplicate records but lets cover it anyways if len(records) > 1: - self.module.fail_json(msg="More than one record already exists for the given attributes. That should be impossible, please open an issue!") + # As Cloudflare API cannot filter record containing quotes + # CAA records must be compared locally + if self.type == 'CAA': + for rr in records: + if rr['data']['flags'] == caa_data['flags'] and rr['data']['tag'] == caa_data['tag'] and rr['data']['value'] == caa_data['value']: + return rr, self.changed + else: + self.module.fail_json(msg="More than one record already exists for the given attributes. That should be impossible, please open an issue!") # record already exists, check if it must be updated if len(records) == 1: cur_record = records[0] do_update = False - if (params['ttl'] is not None) and (cur_record['ttl'] != params['ttl']): + if (self.ttl is not None) and (cur_record['ttl'] != self.ttl): do_update = True - if (params['priority'] is not None) and ('priority' in cur_record) and (cur_record['priority'] != params['priority']): + if (self.priority is not None) and ('priority' in cur_record) and (cur_record['priority'] != self.priority): do_update = True - if ('proxied' in new_record) and ('proxied' in cur_record) and (cur_record['proxied'] != params['proxied']): + if ('proxied' in new_record) and ('proxied' in cur_record) and (cur_record['proxied'] != self.proxied): do_update = True if ('data' in new_record) and ('data' in cur_record): - if (cur_record['data'] != new_record['data']): + if cur_record['data'] != new_record['data']: do_update = True - if (params['type'] == 'CNAME') and (cur_record['content'] != new_record['content']): + if (self.type == 'CNAME') and (cur_record['content'] != new_record['content']): + do_update = True + if cur_record['comment'] != new_record['comment']: + do_update = True + if sorted(cur_record['tags']) != sorted(new_record['tags']): do_update = True if do_update: if self.module.check_mode: @@ -790,19 +896,18 @@ class CloudflareAPI(object): def main(): module = AnsibleModule( argument_spec=dict( - api_token=dict( - type="str", - required=False, - no_log=True, - fallback=(env_fallback, ["CLOUDFLARE_TOKEN"]), - ), - account_api_key=dict(type='str', required=False, no_log=True, aliases=['account_api_token']), - account_email=dict(type='str', required=False), + api_token=dict(type="str", no_log=True, fallback=(env_fallback, ["CLOUDFLARE_TOKEN"])), + account_api_key=dict(type='str', no_log=True, aliases=['account_api_token']), + account_email=dict(type='str'), algorithm=dict(type='int'), cert_usage=dict(type='int', choices=[0, 1, 2, 3]), + comment=dict(type='str'), hash_type=dict(type='int', choices=[1, 2]), key_tag=dict(type='int', no_log=False), port=dict(type='int'), + flag=dict(type='int', choices=[0, 1]), + tag=dict(type='str', choices=['issue', 'issuewild', 'iodef']), + tags=dict(type='list', elements='str'), priority=dict(type='int', default=1), proto=dict(type='str'), proxied=dict(type='bool', default=False), @@ -813,7 +918,7 @@ def main(): state=dict(type='str', default='present', choices=['absent', 'present']), timeout=dict(type='int', default=30), ttl=dict(type='int', default=1), - type=dict(type='str', choices=['A', 'AAAA', 'CNAME', 'DS', 'MX', 'NS', 'SPF', 'SRV', 'SSHFP', 'TLSA', 'TXT']), + type=dict(type='str', choices=['A', 'AAAA', 'CNAME', 'DS', 'MX', 'NS', 'SRV', 'SSHFP', 'TLSA', 'CAA', 'TXT', 'PTR']), value=dict(type='str', aliases=['content']), weight=dict(type='int', default=1), zone=dict(type='str', required=True, aliases=['domain']), @@ -824,11 +929,16 @@ def main(): ('state', 'absent', ['record']), ('type', 'SRV', ['proto', 'service']), ('type', 'TLSA', ['proto', 'port']), + ('type', 'CAA', ['flag', 'tag', 'value']), + ], + required_together=[ + ('account_api_key', 'account_email'), + ], + required_one_of=[ + ['api_token', 'account_api_key'], ], ) - if not module.params['api_token'] and not (module.params['account_api_key'] and module.params['account_email']): - module.fail_json(msg="Either api_token or account_api_key and account_email params are required.") if module.params['type'] == 'SRV': if not ((module.params['weight'] is not None and module.params['port'] is not None and not (module.params['value'] is None or module.params['value'] == '')) @@ -850,6 +960,10 @@ def main(): and (module.params['value'] is None or module.params['value'] == ''))): module.fail_json(msg="For TLSA records the params cert_usage, selector, hash_type and value all need to be defined, or not at all.") + if module.params['type'] == 'CAA': + if not module.params['value'] == '': + module.fail_json(msg="For CAA records the params flag, tag and value all need to be defined.") + if module.params['type'] == 'DS': if not ((module.params['key_tag'] is not None and module.params['algorithm'] is not None and module.params['hash_type'] is not None and not (module.params['value'] is None or module.params['value'] == '')) diff --git a/plugins/modules/cobbler_sync.py b/plugins/modules/cobbler_sync.py index 5e7082ddf5..158f6ee3d6 100644 --- a/plugins/modules/cobbler_sync.py +++ b/plugins/modules/cobbler_sync.py @@ -1,60 +1,62 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2018, Dag Wieers (dagwieers) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: cobbler_sync short_description: Sync Cobbler description: -- Sync Cobbler to commit changes. + - Sync Cobbler to commit changes. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: host: description: - - The name or IP address of the Cobbler system. + - The name or IP address of the Cobbler system. default: 127.0.0.1 type: str port: description: - - Port number to be used for REST connection. - - The default value depends on parameter C(use_ssl). + - Port number to be used for REST connection. + - The default value depends on parameter O(use_ssl). type: int username: description: - - The username to log in to Cobbler. + - The username to log in to Cobbler. default: cobbler type: str password: description: - - The password to log in to Cobbler. + - The password to log in to Cobbler. type: str use_ssl: description: - - If C(false), an HTTP connection will be used instead of the default HTTPS connection. + - If V(false), an HTTP connection is used instead of the default HTTPS connection. type: bool default: true validate_certs: description: - - If C(false), SSL certificates will not be validated. - - This should only set to C(false) when used on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates are not validated. + - This should only set to V(false) when used on personally controlled sites using self-signed certificates. type: bool default: true author: -- Dag Wieers (@dagwieers) + - Dag Wieers (@dagwieers) todo: notes: -- Concurrently syncing Cobbler is bound to fail with weird errors. -- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation. - More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753). -''' + - Concurrently syncing Cobbler is bound to fail with weird errors. +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Commit Cobbler changes community.general.cobbler_sync: host: cobbler01 @@ -62,19 +64,22 @@ EXAMPLES = r''' password: MySuperSecureP4sswOrd run_once: true delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" # Default return values -''' +""" -import datetime import ssl +import xmlrpc.client as xmlrpc_client from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import xmlrpc_client from ansible.module_utils.common.text.converters import to_text +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + def main(): module = AnsibleModule( @@ -103,7 +108,7 @@ def main(): changed=True, ) - start = datetime.datetime.utcnow() + start = now() ssl_context = None if not validate_certs: @@ -135,7 +140,7 @@ def main(): except Exception as e: module.fail_json(msg="Failed to sync Cobbler. {error}".format(error=to_text(e))) - elapsed = datetime.datetime.utcnow() - start + elapsed = now() - start module.exit_json(elapsed=elapsed.seconds, **result) diff --git a/plugins/modules/cobbler_system.py b/plugins/modules/cobbler_system.py index 973478b627..80a45854c9 100644 --- a/plugins/modules/cobbler_system.py +++ b/plugins/modules/cobbler_system.py @@ -1,83 +1,85 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2018, Dag Wieers (dagwieers) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: cobbler_system short_description: Manage system objects in Cobbler description: -- Add, modify or remove systems in Cobbler + - Add, modify or remove systems in Cobbler. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full options: host: description: - - The name or IP address of the Cobbler system. + - The name or IP address of the Cobbler system. default: 127.0.0.1 type: str port: description: - - Port number to be used for REST connection. - - The default value depends on parameter C(use_ssl). + - Port number to be used for REST connection. + - The default value depends on parameter O(use_ssl). type: int username: description: - - The username to log in to Cobbler. + - The username to log in to Cobbler. default: cobbler type: str password: description: - - The password to log in to Cobbler. + - The password to log in to Cobbler. type: str use_ssl: description: - - If C(false), an HTTP connection will be used instead of the default HTTPS connection. + - If V(false), an HTTP connection is used instead of the default HTTPS connection. type: bool default: true validate_certs: description: - - If C(false), SSL certificates will not be validated. - - This should only set to C(false) when used on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates are not validated. + - This should only set to V(false) when used on personally controlled sites using self-signed certificates. type: bool default: true name: description: - - The system name to manage. + - The system name to manage. type: str properties: description: - - A dictionary with system properties. + - A dictionary with system properties. type: dict interfaces: description: - - A list of dictionaries containing interface options. + - A list of dictionaries containing interface options. type: dict sync: description: - - Sync on changes. - - Concurrently syncing Cobbler is bound to fail. + - Sync on changes. + - Concurrently syncing Cobbler is bound to fail. type: bool default: false state: description: - - Whether the system should be present, absent or a query is made. - choices: [ absent, present, query ] + - Whether the system should be present, absent or a query is made. + choices: [absent, present, query] default: present type: str author: -- Dag Wieers (@dagwieers) + - Dag Wieers (@dagwieers) notes: -- Concurrently syncing Cobbler is bound to fail with weird errors. -- On python 2.7.8 and older (i.e. on RHEL7) you may need to tweak the python behaviour to disable certificate validation. - More information at L(Certificate verification in Python standard library HTTP clients,https://access.redhat.com/articles/2039753). -''' + - Concurrently syncing Cobbler is bound to fail with weird errors. +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure the system exists in Cobbler community.general.cobbler_system: host: cobbler01 @@ -86,7 +88,7 @@ EXAMPLES = r''' name: myhost properties: profile: CentOS6-x86_64 - name_servers: [ 2.3.4.5, 3.4.5.6 ] + name_servers: [2.3.4.5, 3.4.5.6] name_servers_search: foo.com, bar.com interfaces: eth0: @@ -132,27 +134,30 @@ EXAMPLES = r''' name: myhost state: absent delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" systems: - description: List of systems - returned: I(state=query) and I(name) is not provided + description: List of systems. + returned: O(state=query) and O(name) is not provided type: list system: - description: (Resulting) information about the system we are working with - returned: when I(name) is provided + description: (Resulting) information about the system we are working with. + returned: when O(name) is provided type: dict -''' +""" -import datetime import ssl +import xmlrpc.client as xmlrpc_client from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import iteritems -from ansible.module_utils.six.moves import xmlrpc_client from ansible.module_utils.common.text.converters import to_text +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + IFPROPS_MAPPING = dict( bondingopts='bonding_opts', bridgeopts='bridge_opts', @@ -225,7 +230,7 @@ def main(): changed=False, ) - start = datetime.datetime.utcnow() + start = now() ssl_context = None if not validate_certs: @@ -269,9 +274,13 @@ def main(): if system: # Update existing entry - system_id = conn.get_system_handle(name, token) + system_id = '' + if LooseVersion(str(conn.version())) >= LooseVersion('3.4'): + system_id = conn.get_system_handle(name) + else: + system_id = conn.get_system_handle(name, token) - for key, value in iteritems(module.params['properties']): + for key, value in module.params['properties'].items(): if key not in system: module.warn("Property '{0}' is not a valid system property.".format(key)) if system[key] != value: @@ -288,7 +297,7 @@ def main(): result['changed'] = True if module.params['properties']: - for key, value in iteritems(module.params['properties']): + for key, value in module.params['properties'].items(): try: conn.modify_system(system_id, key, value, token) except Exception as e: @@ -297,8 +306,8 @@ def main(): # Add interface properties interface_properties = dict() if module.params['interfaces']: - for device, values in iteritems(module.params['interfaces']): - for key, value in iteritems(values): + for device, values in module.params['interfaces'].items(): + for key, value in values.items(): if key == 'name': continue if key not in IFPROPS_MAPPING: @@ -333,7 +342,7 @@ def main(): if module._diff: result['diff'] = dict(before=system, after=result['system']) - elapsed = datetime.datetime.utcnow() - start + elapsed = now() - start module.exit_json(elapsed=elapsed.seconds, **result) diff --git a/plugins/modules/composer.py b/plugins/modules/composer.py index 34a15edda5..8301e3174f 100644 --- a/plugins/modules/composer.py +++ b/plugins/modules/composer.py @@ -1,116 +1,120 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2014, Dimitrios Tydeas Mengidis # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: composer author: - - "Dimitrios Tydeas Mengidis (@dmtrs)" - - "René Moser (@resmo)" + - "Dimitrios Tydeas Mengidis (@dmtrs)" + - "René Moser (@resmo)" short_description: Dependency Manager for PHP description: - - > - Composer is a tool for dependency management in PHP. It allows you to - declare the dependent libraries your project needs and it will install - them in your project for you. + - Composer is a tool for dependency management in PHP. It allows you to declare the dependent libraries your project needs + and it installs them in your project for you. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - command: - type: str - description: - - Composer command like "install", "update" and so on. - default: install - arguments: - type: str - description: - - Composer arguments like required package, version and so on. - default: '' - executable: - type: path - description: - - Path to PHP Executable on the remote host, if PHP is not in PATH. - aliases: [ php_path ] - working_dir: - type: path - description: - - Directory of your project (see --working-dir). This is required when - the command is not run globally. - - Will be ignored if I(global_command=true). - global_command: - description: - - Runs the specified command globally. - type: bool - default: false - prefer_source: - description: - - Forces installation from package sources when possible (see --prefer-source). - default: false - type: bool - prefer_dist: - description: - - Forces installation from package dist even for dev versions (see --prefer-dist). - default: false - type: bool - no_dev: - description: - - Disables installation of require-dev packages (see --no-dev). - default: true - type: bool - no_scripts: - description: - - Skips the execution of all scripts defined in composer.json (see --no-scripts). - default: false - type: bool - no_plugins: - description: - - Disables all plugins (see --no-plugins). - default: false - type: bool - optimize_autoloader: - description: - - Optimize autoloader during autoloader dump (see --optimize-autoloader). - - Convert PSR-0/4 autoloading to classmap to get a faster autoloader. - - Recommended especially for production, but can take a bit of time to run. - default: true - type: bool - classmap_authoritative: - description: - - Autoload classes from classmap only. - - Implicitly enable optimize_autoloader. - - Recommended especially for production, but can take a bit of time to run. - default: false - type: bool - apcu_autoloader: - description: - - Uses APCu to cache found/not-found classes - default: false - type: bool - ignore_platform_reqs: - description: - - Ignore php, hhvm, lib-* and ext-* requirements and force the installation even if the local machine does not fulfill these. - default: false - type: bool - composer_executable: - type: path - description: - - Path to composer executable on the remote host, if composer is not in C(PATH) or a custom composer is needed. - version_added: 3.2.0 + command: + type: str + description: + - Composer command like V(install), V(update) and so on. + default: install + arguments: + type: str + description: + - Composer arguments like required package, version and so on. + default: '' + executable: + type: path + description: + - Path to PHP executable on the remote host, if PHP is not in E(PATH). + aliases: [php_path] + working_dir: + type: path + description: + - Directory of your project (see C(--working-dir)). This is required when the command is not run globally. + - This is ignored if O(global_command=true). + global_command: + description: + - Runs the specified command globally. + type: bool + default: false + prefer_source: + description: + - Forces installation from package sources when possible (see C(--prefer-source)). + default: false + type: bool + prefer_dist: + description: + - Forces installation from package dist even for dev versions (see C(--prefer-dist)). + default: false + type: bool + no_dev: + description: + - Disables installation of require-dev packages (see C(--no-dev)). + default: true + type: bool + no_scripts: + description: + - Skips the execution of all scripts defined in composer.json (see C(--no-scripts)). + default: false + type: bool + no_plugins: + description: + - Disables all plugins (see C(--no-plugins)). + default: false + type: bool + optimize_autoloader: + description: + - Optimize autoloader during autoloader dump (see C(--optimize-autoloader)). + - Convert PSR-0/4 autoloading to classmap to get a faster autoloader. + - Recommended especially for production, but can take a bit of time to run. + default: true + type: bool + classmap_authoritative: + description: + - Autoload classes from classmap only. + - Implicitly enable optimize_autoloader. + - Recommended especially for production, but can take a bit of time to run. + default: false + type: bool + apcu_autoloader: + description: + - Uses APCu to cache found/not-found classes. + default: false + type: bool + ignore_platform_reqs: + description: + - Ignore C(php), C(hhvm), C(lib-*) and C(ext-*) requirements and force the installation even if the local machine does not fulfill + these. + default: false + type: bool + composer_executable: + type: path + description: + - Path to composer executable on the remote host, if composer is not in E(PATH) or a custom composer is needed. + version_added: 3.2.0 requirements: - - php - - composer installed in bin path (recommended /usr/local/bin) or specified in I(composer_executable) + - php + - composer installed in bin path (recommended C(/usr/local/bin)) or specified in O(composer_executable) notes: - - Default options that are always appended in each execution are --no-ansi, --no-interaction and --no-progress if available. - - We received reports about issues on macOS if composer was installed by Homebrew. Please use the official install method to avoid issues. -''' + - Default options that are always appended in each execution are C(--no-ansi), C(--no-interaction) and C(--no-progress) + if available. + - We received reports about issues on macOS if composer was installed by Homebrew. Please use the official install method + to avoid issues. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Download and installs all libs and dependencies outlined in the /path/to/project/composer.lock community.general.composer: command: install @@ -134,9 +138,10 @@ EXAMPLES = ''' command: require global_command: true arguments: my/package -''' +""" import re +import shlex from ansible.module_utils.basic import AnsibleModule @@ -154,7 +159,7 @@ def has_changed(string): def get_available_options(module, command='install'): # get all available options from a composer command using composer help to json - rc, out, err = composer_command(module, "help %s" % command, arguments="--no-interaction --format=json") + rc, out, err = composer_command(module, ["help", command], arguments=["--no-interaction", "--format=json"]) if rc != 0: output = parse_out(err) module.fail_json(msg=output) @@ -163,9 +168,19 @@ def get_available_options(module, command='install'): return command_help_json['definition']['options'] -def composer_command(module, command, arguments="", options=None, global_command=False): +def composer_command(module, command, arguments=None, options=None): if options is None: options = [] + if arguments is None: + arguments = [] + + global_command = module.params['global_command'] + + if global_command: + global_arg = ["global"] + else: + global_arg = [] + options.extend(['--working-dir', module.params['working_dir']]) if module.params['executable'] is None: php_path = module.get_bin_path("php", True, ["/usr/local/bin"]) @@ -177,7 +192,7 @@ def composer_command(module, command, arguments="", options=None, global_command else: composer_path = module.params['composer_executable'] - cmd = "%s %s %s %s %s %s" % (php_path, composer_path, "global" if global_command else "", command, " ".join(options), arguments) + cmd = [php_path, composer_path] + global_arg + command + options + arguments return module.run_command(cmd) @@ -209,8 +224,7 @@ def main(): if re.search(r"\s", command): module.fail_json(msg="Use the 'arguments' param for passing arguments with the 'command'") - arguments = module.params['arguments'] - global_command = module.params['global_command'] + arguments = shlex.split(module.params['arguments']) available_options = get_available_options(module=module, command=command) options = [] @@ -227,9 +241,6 @@ def main(): option = "--%s" % option options.append(option) - if not global_command: - options.extend(['--working-dir', "'%s'" % module.params['working_dir']]) - option_params = { 'prefer_source': 'prefer-source', 'prefer_dist': 'prefer-dist', @@ -253,7 +264,7 @@ def main(): else: module.exit_json(skipped=True, msg="command '%s' does not support check mode, skipping" % command) - rc, out, err = composer_command(module, command, arguments, options, global_command) + rc, out, err = composer_command(module, [command], arguments, options) if rc != 0: output = parse_out(err) diff --git a/plugins/modules/consul.py b/plugins/modules/consul.py index dea853b98e..456335babf 100644 --- a/plugins/modules/consul.py +++ b/plugins/modules/consul.py @@ -1,162 +1,160 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (c) 2015, Steve Gargan # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: consul -short_description: Add, modify & delete services within a consul cluster +short_description: Add, modify & delete services within a Consul cluster description: - - Registers services and checks for an agent with a consul cluster. - A service is some process running on the agent node that should be advertised by - consul's discovery mechanism. It may optionally supply a check definition, - a periodic service test to notify the consul cluster of service's health. - - "Checks may also be registered per node e.g. disk usage, or cpu usage and - notify the health of the entire node to the cluster. - Service level checks do not require a check name or id as these are derived - by Consul from the Service name and id respectively by appending 'service:' - Node level checks require a I(check_name) and optionally a I(check_id)." - - Currently, there is no complete way to retrieve the script, interval or ttl - metadata for a registered check. Without this metadata it is not possible to - tell if the data supplied with ansible represents a change to a check. As a - result this does not attempt to determine changes and will always report a - changed occurred. An API method is planned to supply this metadata so at that - stage change management will be added. - - "See U(http://consul.io) for more details." + - Registers services and checks for an agent with a Consul cluster. A service is some process running on the agent node + that should be advertised by Consul's discovery mechanism. It may optionally supply a check definition, a periodic service + test to notify the Consul cluster of service's health. + - Checks may also be registered per node, for example disk usage, or cpu usage and notify the health of the entire node + to the cluster. Service level checks do not require a check name or ID as these are derived by Consul from the Service + name and ID respectively by appending V(service:) Node level checks require a O(check_name) and optionally a O(check_id). + - Currently, there is no complete way to retrieve the script, interval or TTL metadata for a registered check. Without this + metadata it is not possible to tell if the data supplied with ansible represents a change to a check. As a result this + does not attempt to determine changes and it always reports a changed occurred. An API method is planned to supply this + metadata so at that stage change management is to be added. + - See U(http://consul.io) for more details. requirements: - - python-consul + - py-consul - requests author: "Steve Gargan (@sgargan)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - state: - type: str - description: - - Register or deregister the consul service, defaults to present. - default: present - choices: ['present', 'absent'] - service_name: - type: str - description: - - Unique name for the service on a node, must be unique per node, - required if registering a service. May be omitted if registering - a node level check. - service_id: - type: str - description: - - The ID for the service, must be unique per node. If I(state=absent), - defaults to the service name if supplied. - host: - type: str - description: - - Host of the consul agent defaults to localhost. - default: localhost - port: - type: int - description: - - The port on which the consul agent is running. - default: 8500 - scheme: - type: str - description: - - The protocol scheme on which the consul agent is running. - default: http - validate_certs: - description: - - Whether to verify the TLS certificate of the consul agent. - type: bool - default: true - notes: - type: str - description: - - Notes to attach to check when registering it. - service_port: - type: int - description: - - The port on which the service is listening. Can optionally be supplied for - registration of a service, i.e. if I(service_name) or I(service_id) is set. - service_address: - type: str - description: - - The address to advertise that the service will be listening on. - This value will be passed as the I(address) parameter to Consul's - C(/v1/agent/service/register) API method, so refer to the Consul API - documentation for further details. - tags: - type: list - elements: str - description: - - Tags that will be attached to the service registration. - script: - type: str - description: - - The script/command that will be run periodically to check the health of the service. - - Requires I(interval) to be provided. - interval: - type: str - description: - - The interval at which the service check will be run. - This is a number with a C(s) or C(m) suffix to signify the units of seconds or minutes e.g C(15s) or C(1m). - If no suffix is supplied C(s) will be used by default, e.g. C(10) will be C(10s). - - Required if one of the parameters I(script), I(http), or I(tcp) is specified. - check_id: - type: str - description: - - An ID for the service check. If I(state=absent), defaults to - I(check_name). Ignored if part of a service definition. - check_name: - type: str - description: - - Name for the service check. Required if standalone, ignored if - part of service definition. - ttl: - type: str - description: - - Checks can be registered with a ttl instead of a I(script) and I(interval) - this means that the service will check in with the agent before the - ttl expires. If it doesn't the check will be considered failed. - Required if registering a check and the script an interval are missing - Similar to the interval this is a number with a C(s) or C(m) suffix to - signify the units of seconds or minutes e.g C(15s) or C(1m). - If no suffix is supplied C(s) will be used by default, e.g. C(10) will be C(10s). - tcp: - type: str - description: - - Checks can be registered with a TCP port. This means that consul - will check if the connection attempt to that port is successful (that is, the port is currently accepting connections). - The format is C(host:port), for example C(localhost:80). - - Requires I(interval) to be provided. - version_added: '1.3.0' - http: - type: str - description: - - Checks can be registered with an HTTP endpoint. This means that consul - will check that the http endpoint returns a successful HTTP status. - - Requires I(interval) to be provided. - timeout: - type: str - description: - - A custom HTTP check timeout. The consul default is 10 seconds. - Similar to the interval this is a number with a C(s) or C(m) suffix to - signify the units of seconds or minutes, e.g. C(15s) or C(1m). - If no suffix is supplied C(s) will be used by default, e.g. C(10) will be C(10s). - token: - type: str - description: - - The token key identifying an ACL rule set. May be required to register services. - ack_params_state_absent: - type: bool - description: - - Disable deprecation warning when using parameters incompatible with I(state=absent). -''' + state: + type: str + description: + - Register or deregister the Consul service, defaults to present. + default: present + choices: ['present', 'absent'] + service_name: + type: str + description: + - Unique name for the service on a node, must be unique per node, required if registering a service. May be omitted + if registering a node level check. + service_id: + type: str + description: + - The ID for the service, must be unique per node. If O(state=absent), defaults to the service name if supplied. + host: + type: str + description: + - Host of the Consul agent defaults to localhost. + default: localhost + port: + type: int + description: + - The port on which the Consul agent is running. + default: 8500 + scheme: + type: str + description: + - The protocol scheme on which the Consul agent is running. + default: http + validate_certs: + description: + - Whether to verify the TLS certificate of the Consul agent. + type: bool + default: true + notes: + type: str + description: + - Notes to attach to check when registering it. + service_port: + type: int + description: + - The port on which the service is listening. Can optionally be supplied for registration of a service, that is if O(service_name) + or O(service_id) is set. + service_address: + type: str + description: + - The address to advertise that the service is listening on. This value is passed as the C(address) parameter to Consul's + C(/v1/agent/service/register) API method, so refer to the Consul API documentation for further details. + tags: + type: list + elements: str + description: + - Tags that are attached to the service registration. + script: + type: str + description: + - The script/command that is run periodically to check the health of the service. + - Requires O(interval) to be provided. + - Mutually exclusive with O(ttl), O(tcp) and O(http). + interval: + type: str + description: + - The interval at which the service check is run. This is a number with a V(s) or V(m) suffix to signify the units of + seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) is used by default, for example V(10) + is V(10s). + - Required if one of the parameters O(script), O(http), or O(tcp) is specified. + check_id: + type: str + description: + - An ID for the service check. If O(state=absent), defaults to O(check_name). Ignored if part of a service definition. + check_name: + type: str + description: + - Name for the service check. Required if standalone, ignored if part of service definition. + check_node: + description: + - Node name. + type: str + check_host: + description: + - Host name. + type: str + ttl: + type: str + description: + - Checks can be registered with a TTL instead of a O(script) and O(interval) this means that the service checks in with + the agent before the TTL expires. If it does not the check is considered failed. Required if registering a check and + the script an interval are missing Similar to the interval this is a number with a V(s) or V(m) suffix to signify + the units of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) is used by default, for + example V(10) is equivalent to V(10s). + - Mutually exclusive with O(script), O(tcp) and O(http). + tcp: + type: str + description: + - Checks can be registered with a TCP port. This means that Consul checks if the connection attempt to that port is + successful (that is, the port is currently accepting connections). The format is V(host:port), for example V(localhost:80). + - Requires O(interval) to be provided. + - Mutually exclusive with O(script), O(ttl) and O(http). + version_added: '1.3.0' + http: + type: str + description: + - Checks can be registered with an HTTP endpoint. This means that Consul checks that the http endpoint returns a successful + HTTP status. + - Requires O(interval) to be provided. + - Mutually exclusive with O(script), O(ttl) and O(tcp). + timeout: + type: str + description: + - A custom HTTP check timeout. The Consul default is 10 seconds. Similar to the interval this is a number with a V(s) + or V(m) suffix to signify the units of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) + is used by default, for example V(10) is equivalent to V(10s). + token: + type: str + description: + - The token key identifying an ACL rule set. May be required to register services. +""" -EXAMPLES = ''' -- name: Register nginx service with the local consul agent +EXAMPLES = r""" +- name: Register nginx service with the local Consul agent community.general.consul: service_name: nginx service_port: 80 @@ -222,7 +220,7 @@ EXAMPLES = ''' service_id: nginx interval: 60s http: http://localhost:80/morestatus -''' +""" try: import consul @@ -370,13 +368,7 @@ def get_service_by_id_or_name(consul_api, service_id_or_name): def parse_check(module): - _checks = [module.params[p] for p in ('script', 'ttl', 'tcp', 'http') if module.params[p]] - - if len(_checks) > 1: - module.fail_json( - msg='checks are either script, tcp, http or ttl driven, supplying more than one does not make sense') - - if module.params['check_id'] or _checks: + if module.params['check_id'] or any(module.params[p] is not None for p in ('script', 'ttl', 'tcp', 'http')): return ConsulCheck( module.params['check_id'], module.params['check_name'], @@ -494,15 +486,9 @@ class ConsulCheck(object): self.check = consul.Check.ttl(self.ttl) if http: - if interval is None: - raise Exception('http check must specify interval') - self.check = consul.Check.http(http, self.interval, self.timeout) if tcp: - if interval is None: - raise Exception('tcp check must specify interval') - regex = r"(?P.*):(?P(?:[0-9]+))$" match = re.match(regex, tcp) @@ -561,7 +547,7 @@ class ConsulCheck(object): def test_dependencies(module): if not python_consul_installed: - module.fail_json(msg="python-consul required for this module. see https://python-consul.readthedocs.io/en/latest/#installation") + module.fail_json(msg="py-consul required for this module. see https://github.com/criteo/py-consul?tab=readme-ov-file#installation") def main(): @@ -589,30 +575,28 @@ def main(): timeout=dict(type='str'), tags=dict(type='list', elements='str'), token=dict(no_log=True), - ack_params_state_absent=dict(type='bool'), ), + mutually_exclusive=[ + ('script', 'ttl', 'tcp', 'http'), + ], required_if=[ ('state', 'present', ['service_name']), ('state', 'absent', ['service_id', 'service_name', 'check_id', 'check_name'], True), ], + required_by={ + 'script': 'interval', + 'http': 'interval', + 'tcp': 'interval', + }, supports_check_mode=False, ) p = module.params test_dependencies(module) - if p['state'] == 'absent' and any(p[x] for x in ['script', 'ttl', 'tcp', 'http', 'interval']) and not p['ack_params_state_absent']: - module.deprecate( - "The use of parameters 'script', 'ttl', 'tcp', 'http', 'interval' along with 'state=absent' is deprecated. " - "In community.general 8.0.0 their use will become an error. " - "To suppress this deprecation notice, set parameter ack_params_state_absent=true.", - version="8.0.0", - collection_name="community.general", + if p['state'] == 'absent' and any(p[x] for x in ['script', 'ttl', 'tcp', 'http', 'interval']): + module.fail_json( + msg="The use of parameters 'script', 'ttl', 'tcp', 'http', 'interval' along with 'state=absent' is no longer allowed." ) - # When reaching c.g 8.0.0: - # - Replace the deprecation with a fail_json(), remove the "ack_params_state_absent" condition from the "if" - # - Add mutually_exclusive for ('script', 'ttl', 'tcp', 'http'), then remove that validation from parse_check() - # - Add required_by {'script': 'interval', 'http': 'interval', 'tcp': 'interval'}, then remove checks for 'interval' in ConsulCheck.__init__() - # - Deprecate the parameter ack_params_state_absent try: register_with_consul(module) diff --git a/plugins/modules/consul_acl.py b/plugins/modules/consul_acl.py deleted file mode 100644 index b9f14db164..0000000000 --- a/plugins/modules/consul_acl.py +++ /dev/null @@ -1,684 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015, Steve Gargan -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' -module: consul_acl -short_description: Manipulate Consul ACL keys and rules -description: - - Allows the addition, modification and deletion of ACL keys and associated - rules in a consul cluster via the agent. For more details on using and - configuring ACLs, see https://www.consul.io/docs/guides/acl.html. -author: - - Steve Gargan (@sgargan) - - Colin Nolan (@colin-nolan) -options: - mgmt_token: - description: - - a management token is required to manipulate the acl lists - required: true - type: str - state: - description: - - whether the ACL pair should be present or absent - required: false - choices: ['present', 'absent'] - default: present - type: str - token_type: - description: - - the type of token that should be created - choices: ['client', 'management'] - default: client - type: str - name: - description: - - the name that should be associated with the acl key, this is opaque - to Consul - required: false - type: str - token: - description: - - the token key identifying an ACL rule set. If generated by consul - this will be a UUID - required: false - type: str - rules: - type: list - elements: dict - description: - - rules that should be associated with a given token - required: false - host: - description: - - host of the consul agent defaults to localhost - required: false - default: localhost - type: str - port: - type: int - description: - - the port on which the consul agent is running - required: false - default: 8500 - scheme: - description: - - the protocol scheme on which the consul agent is running - required: false - default: http - type: str - validate_certs: - type: bool - description: - - whether to verify the tls certificate of the consul agent - required: false - default: true -requirements: - - python-consul - - pyhcl - - requests -''' - -EXAMPLES = """ -- name: Create an ACL with rules - community.general.consul_acl: - host: consul1.example.com - mgmt_token: some_management_acl - name: Foo access - rules: - - key: "foo" - policy: read - - key: "private/foo" - policy: deny - -- name: Create an ACL with a specific token - community.general.consul_acl: - host: consul1.example.com - mgmt_token: some_management_acl - name: Foo access - token: my-token - rules: - - key: "foo" - policy: read - -- name: Update the rules associated to an ACL token - community.general.consul_acl: - host: consul1.example.com - mgmt_token: some_management_acl - name: Foo access - token: some_client_token - rules: - - event: "bbq" - policy: write - - key: "foo" - policy: read - - key: "private" - policy: deny - - keyring: write - - node: "hgs4" - policy: write - - operator: read - - query: "" - policy: write - - service: "consul" - policy: write - - session: "standup" - policy: write - -- name: Remove a token - community.general.consul_acl: - host: consul1.example.com - mgmt_token: some_management_acl - token: 172bd5c8-9fe9-11e4-b1b0-3c15c2c9fd5e - state: absent -""" - -RETURN = """ -token: - description: the token associated to the ACL (the ACL's ID) - returned: success - type: str - sample: a2ec332f-04cf-6fba-e8b8-acf62444d3da -rules: - description: the HCL JSON representation of the rules associated to the ACL, in the format described in the - Consul documentation (https://www.consul.io/docs/guides/acl.html#rule-specification). - returned: I(status) == "present" - type: dict - sample: { - "key": { - "foo": { - "policy": "write" - }, - "bar": { - "policy": "deny" - } - } - } -operation: - description: the operation performed on the ACL - returned: changed - type: str - sample: update -""" - - -try: - import consul - python_consul_installed = True -except ImportError: - python_consul_installed = False - -try: - import hcl - pyhcl_installed = True -except ImportError: - pyhcl_installed = False - -try: - from requests.exceptions import ConnectionError - has_requests = True -except ImportError: - has_requests = False - -from collections import defaultdict -from ansible.module_utils.basic import to_text, AnsibleModule - - -RULE_SCOPES = [ - "agent", - "agent_prefix", - "event", - "event_prefix", - "key", - "key_prefix", - "keyring", - "node", - "node_prefix", - "operator", - "query", - "query_prefix", - "service", - "service_prefix", - "session", - "session_prefix", -] - -MANAGEMENT_PARAMETER_NAME = "mgmt_token" -HOST_PARAMETER_NAME = "host" -SCHEME_PARAMETER_NAME = "scheme" -VALIDATE_CERTS_PARAMETER_NAME = "validate_certs" -NAME_PARAMETER_NAME = "name" -PORT_PARAMETER_NAME = "port" -RULES_PARAMETER_NAME = "rules" -STATE_PARAMETER_NAME = "state" -TOKEN_PARAMETER_NAME = "token" -TOKEN_TYPE_PARAMETER_NAME = "token_type" - -PRESENT_STATE_VALUE = "present" -ABSENT_STATE_VALUE = "absent" - -CLIENT_TOKEN_TYPE_VALUE = "client" -MANAGEMENT_TOKEN_TYPE_VALUE = "management" - -REMOVE_OPERATION = "remove" -UPDATE_OPERATION = "update" -CREATE_OPERATION = "create" - -_POLICY_JSON_PROPERTY = "policy" -_RULES_JSON_PROPERTY = "Rules" -_TOKEN_JSON_PROPERTY = "ID" -_TOKEN_TYPE_JSON_PROPERTY = "Type" -_NAME_JSON_PROPERTY = "Name" -_POLICY_YML_PROPERTY = "policy" -_POLICY_HCL_PROPERTY = "policy" - -_ARGUMENT_SPEC = { - MANAGEMENT_PARAMETER_NAME: dict(required=True, no_log=True), - HOST_PARAMETER_NAME: dict(default='localhost'), - SCHEME_PARAMETER_NAME: dict(default='http'), - VALIDATE_CERTS_PARAMETER_NAME: dict(type='bool', default=True), - NAME_PARAMETER_NAME: dict(), - PORT_PARAMETER_NAME: dict(default=8500, type='int'), - RULES_PARAMETER_NAME: dict(type='list', elements='dict'), - STATE_PARAMETER_NAME: dict(default=PRESENT_STATE_VALUE, choices=[PRESENT_STATE_VALUE, ABSENT_STATE_VALUE]), - TOKEN_PARAMETER_NAME: dict(no_log=False), - TOKEN_TYPE_PARAMETER_NAME: dict(choices=[CLIENT_TOKEN_TYPE_VALUE, MANAGEMENT_TOKEN_TYPE_VALUE], - default=CLIENT_TOKEN_TYPE_VALUE) -} - - -def set_acl(consul_client, configuration): - """ - Sets an ACL based on the given configuration. - :param consul_client: the consul client - :param configuration: the run configuration - :return: the output of setting the ACL - """ - acls_as_json = decode_acls_as_json(consul_client.acl.list()) - existing_acls_mapped_by_name = dict((acl.name, acl) for acl in acls_as_json if acl.name is not None) - existing_acls_mapped_by_token = dict((acl.token, acl) for acl in acls_as_json) - if None in existing_acls_mapped_by_token: - raise AssertionError("expecting ACL list to be associated to a token: %s" % - existing_acls_mapped_by_token[None]) - - if configuration.token is None and configuration.name and configuration.name in existing_acls_mapped_by_name: - # No token but name given so can get token from name - configuration.token = existing_acls_mapped_by_name[configuration.name].token - - if configuration.token and configuration.token in existing_acls_mapped_by_token: - return update_acl(consul_client, configuration) - else: - if configuration.token in existing_acls_mapped_by_token: - raise AssertionError() - if configuration.name in existing_acls_mapped_by_name: - raise AssertionError() - return create_acl(consul_client, configuration) - - -def update_acl(consul_client, configuration): - """ - Updates an ACL. - :param consul_client: the consul client - :param configuration: the run configuration - :return: the output of the update - """ - existing_acl = load_acl_with_token(consul_client, configuration.token) - changed = existing_acl.rules != configuration.rules - - if changed: - name = configuration.name if configuration.name is not None else existing_acl.name - rules_as_hcl = encode_rules_as_hcl_string(configuration.rules) - updated_token = consul_client.acl.update( - configuration.token, name=name, type=configuration.token_type, rules=rules_as_hcl) - if updated_token != configuration.token: - raise AssertionError() - - return Output(changed=changed, token=configuration.token, rules=configuration.rules, operation=UPDATE_OPERATION) - - -def create_acl(consul_client, configuration): - """ - Creates an ACL. - :param consul_client: the consul client - :param configuration: the run configuration - :return: the output of the creation - """ - rules_as_hcl = encode_rules_as_hcl_string(configuration.rules) if len(configuration.rules) > 0 else None - token = consul_client.acl.create( - name=configuration.name, type=configuration.token_type, rules=rules_as_hcl, acl_id=configuration.token) - rules = configuration.rules - return Output(changed=True, token=token, rules=rules, operation=CREATE_OPERATION) - - -def remove_acl(consul, configuration): - """ - Removes an ACL. - :param consul: the consul client - :param configuration: the run configuration - :return: the output of the removal - """ - token = configuration.token - changed = consul.acl.info(token) is not None - if changed: - consul.acl.destroy(token) - return Output(changed=changed, token=token, operation=REMOVE_OPERATION) - - -def load_acl_with_token(consul, token): - """ - Loads the ACL with the given token (token == rule ID). - :param consul: the consul client - :param token: the ACL "token"/ID (not name) - :return: the ACL associated to the given token - :exception ConsulACLTokenNotFoundException: raised if the given token does not exist - """ - acl_as_json = consul.acl.info(token) - if acl_as_json is None: - raise ConsulACLNotFoundException(token) - return decode_acl_as_json(acl_as_json) - - -def encode_rules_as_hcl_string(rules): - """ - Converts the given rules into the equivalent HCL (string) representation. - :param rules: the rules - :return: the equivalent HCL (string) representation of the rules. Will be None if there is no rules (see internal - note for justification) - """ - if len(rules) == 0: - # Note: empty string is not valid HCL according to `hcl.load` however, the ACL `Rule` property will be an empty - # string if there is no rules... - return None - rules_as_hcl = "" - for rule in rules: - rules_as_hcl += encode_rule_as_hcl_string(rule) - return rules_as_hcl - - -def encode_rule_as_hcl_string(rule): - """ - Converts the given rule into the equivalent HCL (string) representation. - :param rule: the rule - :return: the equivalent HCL (string) representation of the rule - """ - if rule.pattern is not None: - return '%s "%s" {\n %s = "%s"\n}\n' % (rule.scope, rule.pattern, _POLICY_HCL_PROPERTY, rule.policy) - else: - return '%s = "%s"\n' % (rule.scope, rule.policy) - - -def decode_rules_as_hcl_string(rules_as_hcl): - """ - Converts the given HCL (string) representation of rules into a list of rule domain models. - :param rules_as_hcl: the HCL (string) representation of a collection of rules - :return: the equivalent domain model to the given rules - """ - rules_as_hcl = to_text(rules_as_hcl) - rules_as_json = hcl.loads(rules_as_hcl) - return decode_rules_as_json(rules_as_json) - - -def decode_rules_as_json(rules_as_json): - """ - Converts the given JSON representation of rules into a list of rule domain models. - :param rules_as_json: the JSON representation of a collection of rules - :return: the equivalent domain model to the given rules - """ - rules = RuleCollection() - for scope in rules_as_json: - if not isinstance(rules_as_json[scope], dict): - rules.add(Rule(scope, rules_as_json[scope])) - else: - for pattern, policy in rules_as_json[scope].items(): - rules.add(Rule(scope, policy[_POLICY_JSON_PROPERTY], pattern)) - return rules - - -def encode_rules_as_json(rules): - """ - Converts the given rules into the equivalent JSON representation according to the documentation: - https://www.consul.io/docs/guides/acl.html#rule-specification. - :param rules: the rules - :return: JSON representation of the given rules - """ - rules_as_json = defaultdict(dict) - for rule in rules: - if rule.pattern is not None: - if rule.pattern in rules_as_json[rule.scope]: - raise AssertionError() - rules_as_json[rule.scope][rule.pattern] = { - _POLICY_JSON_PROPERTY: rule.policy - } - else: - if rule.scope in rules_as_json: - raise AssertionError() - rules_as_json[rule.scope] = rule.policy - return rules_as_json - - -def decode_rules_as_yml(rules_as_yml): - """ - Converts the given YAML representation of rules into a list of rule domain models. - :param rules_as_yml: the YAML representation of a collection of rules - :return: the equivalent domain model to the given rules - """ - rules = RuleCollection() - if rules_as_yml: - for rule_as_yml in rules_as_yml: - rule_added = False - for scope in RULE_SCOPES: - if scope in rule_as_yml: - if rule_as_yml[scope] is None: - raise ValueError("Rule for '%s' does not have a value associated to the scope" % scope) - policy = rule_as_yml[_POLICY_YML_PROPERTY] if _POLICY_YML_PROPERTY in rule_as_yml \ - else rule_as_yml[scope] - pattern = rule_as_yml[scope] if _POLICY_YML_PROPERTY in rule_as_yml else None - rules.add(Rule(scope, policy, pattern)) - rule_added = True - break - if not rule_added: - raise ValueError("A rule requires one of %s and a policy." % ('/'.join(RULE_SCOPES))) - return rules - - -def decode_acl_as_json(acl_as_json): - """ - Converts the given JSON representation of an ACL into the equivalent domain model. - :param acl_as_json: the JSON representation of an ACL - :return: the equivalent domain model to the given ACL - """ - rules_as_hcl = acl_as_json[_RULES_JSON_PROPERTY] - rules = decode_rules_as_hcl_string(acl_as_json[_RULES_JSON_PROPERTY]) if rules_as_hcl.strip() != "" \ - else RuleCollection() - return ACL( - rules=rules, - token_type=acl_as_json[_TOKEN_TYPE_JSON_PROPERTY], - token=acl_as_json[_TOKEN_JSON_PROPERTY], - name=acl_as_json[_NAME_JSON_PROPERTY] - ) - - -def decode_acls_as_json(acls_as_json): - """ - Converts the given JSON representation of ACLs into a list of ACL domain models. - :param acls_as_json: the JSON representation of a collection of ACLs - :return: list of equivalent domain models for the given ACLs (order not guaranteed to be the same) - """ - return [decode_acl_as_json(acl_as_json) for acl_as_json in acls_as_json] - - -class ConsulACLNotFoundException(Exception): - """ - Exception raised if an ACL with is not found. - """ - - -class Configuration: - """ - Configuration for this module. - """ - - def __init__(self, management_token=None, host=None, scheme=None, validate_certs=None, name=None, port=None, - rules=None, state=None, token=None, token_type=None): - self.management_token = management_token # type: str - self.host = host # type: str - self.scheme = scheme # type: str - self.validate_certs = validate_certs # type: bool - self.name = name # type: str - self.port = port # type: int - self.rules = rules # type: RuleCollection - self.state = state # type: str - self.token = token # type: str - self.token_type = token_type # type: str - - -class Output: - """ - Output of an action of this module. - """ - - def __init__(self, changed=None, token=None, rules=None, operation=None): - self.changed = changed # type: bool - self.token = token # type: str - self.rules = rules # type: RuleCollection - self.operation = operation # type: str - - -class ACL: - """ - Consul ACL. See: https://www.consul.io/docs/guides/acl.html. - """ - - def __init__(self, rules, token_type, token, name): - self.rules = rules - self.token_type = token_type - self.token = token - self.name = name - - def __eq__(self, other): - return other \ - and isinstance(other, self.__class__) \ - and self.rules == other.rules \ - and self.token_type == other.token_type \ - and self.token == other.token \ - and self.name == other.name - - def __hash__(self): - return hash(self.rules) ^ hash(self.token_type) ^ hash(self.token) ^ hash(self.name) - - -class Rule: - """ - ACL rule. See: https://www.consul.io/docs/guides/acl.html#acl-rules-and-scope. - """ - - def __init__(self, scope, policy, pattern=None): - self.scope = scope - self.policy = policy - self.pattern = pattern - - def __eq__(self, other): - return isinstance(other, self.__class__) \ - and self.scope == other.scope \ - and self.policy == other.policy \ - and self.pattern == other.pattern - - def __ne__(self, other): - return not self.__eq__(other) - - def __hash__(self): - return (hash(self.scope) ^ hash(self.policy)) ^ hash(self.pattern) - - def __str__(self): - return encode_rule_as_hcl_string(self) - - -class RuleCollection: - """ - Collection of ACL rules, which are part of a Consul ACL. - """ - - def __init__(self): - self._rules = {} - for scope in RULE_SCOPES: - self._rules[scope] = {} - - def __iter__(self): - all_rules = [] - for scope, pattern_keyed_rules in self._rules.items(): - for pattern, rule in pattern_keyed_rules.items(): - all_rules.append(rule) - return iter(all_rules) - - def __len__(self): - count = 0 - for scope in RULE_SCOPES: - count += len(self._rules[scope]) - return count - - def __eq__(self, other): - return isinstance(other, self.__class__) \ - and set(self) == set(other) - - def __ne__(self, other): - return not self.__eq__(other) - - def __str__(self): - return encode_rules_as_hcl_string(self) - - def add(self, rule): - """ - Adds the given rule to this collection. - :param rule: model of a rule - :raises ValueError: raised if there already exists a rule for a given scope and pattern - """ - if rule.pattern in self._rules[rule.scope]: - patten_info = " and pattern '%s'" % rule.pattern if rule.pattern is not None else "" - raise ValueError("Duplicate rule for scope '%s'%s" % (rule.scope, patten_info)) - self._rules[rule.scope][rule.pattern] = rule - - -def get_consul_client(configuration): - """ - Gets a Consul client for the given configuration. - - Does not check if the Consul client can connect. - :param configuration: the run configuration - :return: Consul client - """ - token = configuration.management_token - if token is None: - token = configuration.token - if token is None: - raise AssertionError("Expecting the management token to always be set") - return consul.Consul(host=configuration.host, port=configuration.port, scheme=configuration.scheme, - verify=configuration.validate_certs, token=token) - - -def check_dependencies(): - """ - Checks that the required dependencies have been imported. - :exception ImportError: if it is detected that any of the required dependencies have not been imported - """ - if not python_consul_installed: - raise ImportError("python-consul required for this module. " - "See: https://python-consul.readthedocs.io/en/latest/#installation") - - if not pyhcl_installed: - raise ImportError("pyhcl required for this module. " - "See: https://pypi.org/project/pyhcl/") - - if not has_requests: - raise ImportError("requests required for this module. See https://pypi.org/project/requests/") - - -def main(): - """ - Main method. - """ - module = AnsibleModule(_ARGUMENT_SPEC, supports_check_mode=False) - - try: - check_dependencies() - except ImportError as e: - module.fail_json(msg=str(e)) - - configuration = Configuration( - management_token=module.params.get(MANAGEMENT_PARAMETER_NAME), - host=module.params.get(HOST_PARAMETER_NAME), - scheme=module.params.get(SCHEME_PARAMETER_NAME), - validate_certs=module.params.get(VALIDATE_CERTS_PARAMETER_NAME), - name=module.params.get(NAME_PARAMETER_NAME), - port=module.params.get(PORT_PARAMETER_NAME), - rules=decode_rules_as_yml(module.params.get(RULES_PARAMETER_NAME)), - state=module.params.get(STATE_PARAMETER_NAME), - token=module.params.get(TOKEN_PARAMETER_NAME), - token_type=module.params.get(TOKEN_TYPE_PARAMETER_NAME) - ) - consul_client = get_consul_client(configuration) - - try: - if configuration.state == PRESENT_STATE_VALUE: - output = set_acl(consul_client, configuration) - else: - output = remove_acl(consul_client, configuration) - except ConnectionError as e: - module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( - configuration.host, configuration.port, str(e))) - raise - - return_values = dict(changed=output.changed, token=output.token, operation=output.operation) - if output.rules is not None: - return_values["rules"] = encode_rules_as_json(output.rules) - module.exit_json(**return_values) - - -if __name__ == "__main__": - main() diff --git a/plugins/modules/consul_acl_bootstrap.py b/plugins/modules/consul_acl_bootstrap.py new file mode 100644 index 0000000000..d7d474e9c6 --- /dev/null +++ b/plugins/modules/consul_acl_bootstrap.py @@ -0,0 +1,104 @@ +#!/usr/bin/python +# +# Copyright (c) 2024, Florian Apolloner (@apollo13) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: consul_acl_bootstrap +short_description: Bootstrap ACLs in Consul +version_added: 8.3.0 +description: + - Allows bootstrapping of ACLs in a Consul cluster, see U(https://developer.hashicorp.com/consul/api-docs/acl#bootstrap-acls) + for details. +author: + - Florian Apolloner (@apollo13) +extends_documentation_fragment: + - community.general.consul + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + state: + description: + - Whether the token should be present or absent. + choices: ['present', 'bootstrapped'] + default: present + type: str + bootstrap_secret: + description: + - The secret to be used as secret ID for the initial token. + - Needs to be an UUID. + type: str +""" + +EXAMPLES = r""" +- name: Bootstrap the ACL system + community.general.consul_acl_bootstrap: + bootstrap_secret: 22eaeed1-bdbd-4651-724e-42ae6c43e387 +""" + +RETURN = r""" +result: + description: + - The bootstrap result as returned by the Consul HTTP API. + - B(Note:) If O(bootstrap_secret) has been specified the C(SecretID) and C(ID) do not contain the secret but C(VALUE_SPECIFIED_IN_NO_LOG_PARAMETER). + If you pass O(bootstrap_secret), make sure your playbook/role does not depend on this return value! + returned: changed + type: dict + sample: + AccessorID: 834a5881-10a9-a45b-f63c-490e28743557 + CreateIndex: 25 + CreateTime: '2024-01-21T20:26:27.114612038+01:00' + Description: Bootstrap Token (Global Management) + Hash: X2AgaFhnQGRhSSF/h0m6qpX1wj/HJWbyXcxkEM/5GrY= + ID: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER + Local: false + ModifyIndex: 25 + Policies: + - ID: 00000000-0000-0000-0000-000000000001 + Name: global-management + SecretID: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, + RequestError, + _ConsulModule, +) + +_ARGUMENT_SPEC = { + "state": dict(type="str", choices=["present", "bootstrapped"], default="present"), + "bootstrap_secret": dict(type="str", no_log=True), +} +_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) +_ARGUMENT_SPEC.pop("token") + + +def main(): + module = AnsibleModule(_ARGUMENT_SPEC) + consul_module = _ConsulModule(module) + + data = {} + if "bootstrap_secret" in module.params: + data["BootstrapSecret"] = module.params["bootstrap_secret"] + + try: + response = consul_module.put("acl/bootstrap", data=data) + except RequestError as e: + if e.status == 403 and b"ACL bootstrap no longer allowed" in e.response_data: + return module.exit_json(changed=False) + raise + else: + return module.exit_json(changed=True, result=response) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/consul_agent_check.py b/plugins/modules/consul_agent_check.py new file mode 100644 index 0000000000..e241c8ddf4 --- /dev/null +++ b/plugins/modules/consul_agent_check.py @@ -0,0 +1,244 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Michael Ilg +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: consul_agent_check +short_description: Add, modify, and delete checks within a Consul cluster +version_added: 9.1.0 +description: + - Allows the addition, modification and deletion of checks in a Consul cluster using the agent. For more details on using + and configuring Checks, see U(https://developer.hashicorp.com/consul/api-docs/agent/check). + - Currently, there is no complete way to retrieve the script, interval or TTL metadata for a registered check. Without this + metadata it is not possible to tell if the data supplied with ansible represents a change to a check. As a result, the + module does not attempt to determine changes and it always reports a changed occurred. An API method is planned to supply + this metadata so at that stage change management is to be added. +author: + - Michael Ilg (@Ilgmi) +extends_documentation_fragment: + - community.general.consul + - community.general.consul.actiongroup_consul + - community.general.consul.token + - community.general.attributes +attributes: + check_mode: + support: full + details: + - The result is the object as it is defined in the module options and not the object structure of the Consul API. For + a better overview of what the object structure looks like, take a look at U(https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks). + diff_mode: + support: partial + details: + - In check mode the diff shows the object as it is defined in the module options and not the object structure of the + Consul API. +options: + state: + description: + - Whether the check should be present or absent. + choices: ['present', 'absent'] + default: present + type: str + name: + description: + - Required name for the service check. + type: str + id: + description: + - Specifies a unique ID for this check on the node. This defaults to the O(name) parameter, but it may be necessary + to provide an ID for uniqueness. This value is returned in the response as V(CheckId). + type: str + interval: + description: + - The interval at which the service check is run. This is a number with a V(s) or V(m) suffix to signify the units of + seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) is used by default, for example V(10) + is equivalent to V(10s). + - Required if one of the parameters O(args), O(http), or O(tcp) is specified. + type: str + notes: + description: + - Notes to attach to check when registering it. + type: str + args: + description: + - Specifies command arguments to run to update the status of the check. + - Requires O(interval) to be provided. + - Mutually exclusive with O(ttl), O(tcp) and O(http). + type: list + elements: str + ttl: + description: + - Checks can be registered with a TTL instead of a O(args) and O(interval) this means that the service checks in with + the agent before the TTL expires. If it does not the check is considered failed. Required if registering a check and + the script an interval are missing Similar to the interval this is a number with a V(s) or V(m) suffix to signify + the units of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) is used by default, for + example V(10) is equivalent to V(10s). + - Mutually exclusive with O(args), O(tcp) and O(http). + type: str + tcp: + description: + - Checks can be registered with a TCP port. This means that Consul will check if the connection attempt to that port + is successful (that is, the port is currently accepting connections). The format is V(host:port), for example V(localhost:80). + - Requires O(interval) to be provided. + - Mutually exclusive with O(args), O(ttl) and O(http). + type: str + version_added: '1.3.0' + http: + description: + - Checks can be registered with an HTTP endpoint. This means that Consul checks that the HTTP endpoint returns a successful + HTTP status. + - Requires O(interval) to be provided. + - Mutually exclusive with O(args), O(ttl) and O(tcp). + type: str + timeout: + description: + - A custom HTTP check timeout. The Consul default is 10 seconds. Similar to the interval this is a number with a V(s) + or V(m) suffix to signify the units of seconds or minutes, for example V(15s) or V(1m). If no suffix is supplied V(s) + is used by default, for example V(10) is equivalent to V(10s). + type: str + service_id: + description: + - The ID for the service, must be unique per node. If O(state=absent), defaults to the service name if supplied. + type: str +""" + +EXAMPLES = r""" +- name: Register tcp check for service 'nginx' + community.general.consul_agent_check: + name: nginx_tcp_check + service_id: nginx + interval: 60s + tcp: localhost:80 + notes: "Nginx Check" + +- name: Register http check for service 'nginx' + community.general.consul_agent_check: + name: nginx_http_check + service_id: nginx + interval: 60s + http: http://localhost:80/status + notes: "Nginx Check" + +- name: Remove check for service 'nginx' + community.general.consul_agent_check: + state: absent + id: nginx_http_check + service_id: "{{ nginx_service.ID }}" +""" + +RETURN = r""" +check: + description: The check as returned by the Consul HTTP API. + returned: always + type: dict + sample: + CheckID: nginx_check + ServiceID: nginx + Interval: 30s + Type: http + Notes: Nginx Check +operation: + description: The operation performed. + returned: changed + type: str + sample: update +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, + OPERATION_CREATE, + OPERATION_UPDATE, + OPERATION_DELETE, + OPERATION_READ, + _ConsulModule, + validate_check, +) + +_ARGUMENT_SPEC = { + "state": dict(default="present", choices=["present", "absent"]), + "name": dict(type='str'), + "id": dict(type='str'), + "interval": dict(type='str'), + "notes": dict(type='str'), + "args": dict(type='list', elements='str'), + "http": dict(type='str'), + "tcp": dict(type='str'), + "ttl": dict(type='str'), + "timeout": dict(type='str'), + "service_id": dict(type='str'), +} + +_MUTUALLY_EXCLUSIVE = [ + ('args', 'ttl', 'tcp', 'http'), +] + +_REQUIRED_IF = [ + ('state', 'present', ['name']), + ('state', 'absent', ('id', 'name'), True), +] + +_REQUIRED_BY = { + 'args': 'interval', + 'http': 'interval', + 'tcp': 'interval', +} + +_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) + + +class ConsulAgentCheckModule(_ConsulModule): + api_endpoint = "agent/check" + result_key = "check" + unique_identifiers = ["id", "name"] + operational_attributes = {"Node", "CheckID", "Output", "ServiceName", "ServiceTags", + "Status", "Type", "ExposedPort", "Definition"} + + def endpoint_url(self, operation, identifier=None): + if operation == OPERATION_READ: + return "agent/checks" + if operation in [OPERATION_CREATE, OPERATION_UPDATE]: + return "/".join([self.api_endpoint, "register"]) + if operation == OPERATION_DELETE: + return "/".join([self.api_endpoint, "deregister", identifier]) + + return super(ConsulAgentCheckModule, self).endpoint_url(operation, identifier) + + def read_object(self): + url = self.endpoint_url(OPERATION_READ) + checks = self.get(url) + identifier = self.id_from_obj(self.params) + if identifier in checks: + return checks[identifier] + return None + + def prepare_object(self, existing, obj): + existing = super(ConsulAgentCheckModule, self).prepare_object(existing, obj) + validate_check(existing) + return existing + + def delete_object(self, obj): + if not self._module.check_mode: + self.put(self.endpoint_url(OPERATION_DELETE, obj.get("CheckID"))) + return {} + + +def main(): + module = AnsibleModule( + _ARGUMENT_SPEC, + mutually_exclusive=_MUTUALLY_EXCLUSIVE, + required_if=_REQUIRED_IF, + required_by=_REQUIRED_BY, + supports_check_mode=True, + ) + + consul_module = ConsulAgentCheckModule(module) + consul_module.execute() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/consul_agent_service.py b/plugins/modules/consul_agent_service.py new file mode 100644 index 0000000000..7d7c94c05a --- /dev/null +++ b/plugins/modules/consul_agent_service.py @@ -0,0 +1,281 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Michael Ilg +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: consul_agent_service +short_description: Add, modify and delete services within a Consul cluster +version_added: 9.1.0 +description: + - Allows the addition, modification and deletion of services in a Consul cluster using the agent. + - There are currently no plans to create services and checks in one. This is because the Consul API does not provide checks + for a service and the checks themselves do not match the module parameters. Therefore, only a service without checks can + be created in this module. +author: + - Michael Ilg (@Ilgmi) +extends_documentation_fragment: + - community.general.consul + - community.general.consul.actiongroup_consul + - community.general.consul.token + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: partial + details: + - In check mode the diff misses operational attributes. +options: + state: + description: + - Whether the service should be present or absent. + choices: ['present', 'absent'] + default: present + type: str + name: + description: + - Unique name for the service on a node, must be unique per node, required if registering a service. + type: str + id: + description: + - Specifies a unique ID for this service. This must be unique per agent. This defaults to the O(name) parameter if not + provided. If O(state=absent), defaults to the service name if supplied. + type: str + tags: + description: + - Tags that are attached to the service registration. + type: list + elements: str + address: + description: + - The address to advertise that the service listens on. This value is passed as the C(address) parameter to Consul's + C(/v1/agent/service/register) API method, so refer to the Consul API documentation for further details. + type: str + meta: + description: + - Optional meta data used for filtering. For keys, the characters C(A-Z), C(a-z), C(0-9), C(_), C(-) are allowed. Not + allowed characters are replaced with underscores. + type: dict + service_port: + description: + - The port on which the service is listening. Can optionally be supplied for registration of a service, that is if O(name) + or O(id) is set. + type: int + enable_tag_override: + description: + - Specifies to disable the anti-entropy feature for this service's tags. If C(EnableTagOverride) is set to true then + external agents can update this service in the catalog and modify the tags. + type: bool + default: false + weights: + description: + - Specifies weights for the service. + type: dict + suboptions: + passing: + description: + - Weights for passing. + type: int + default: 1 + warning: + description: + - Weights for warning. + type: int + default: 1 + default: {"passing": 1, "warning": 1} +""" + +EXAMPLES = r""" +- name: Register nginx service with the local Consul agent + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + +- name: Register nginx with a tcp check + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + +- name: Register nginx with an http check + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + +- name: Register external service nginx available at 10.1.5.23 + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + address: 10.1.5.23 + +- name: Register nginx with some service tags + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + tags: + - prod + - webservers + +- name: Register nginx with some service meta + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: nginx + service_port: 80 + meta: + nginx_version: 1.25.3 + +- name: Remove nginx service + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + service_id: nginx + state: absent + +- name: Register celery worker service + community.general.consul_agent_service: + host: consul1.example.com + token: some_management_acl + name: celery-worker + tags: + - prod + - worker +""" + +RETURN = r""" +service: + description: The service as returned by the Consul HTTP API. + returned: always + type: dict + sample: + ID: nginx + Service: nginx + Address: localhost + Port: 80 + Tags: + - http + Meta: + - nginx_version: 1.23.3 + Datacenter: dc1 + Weights: + Passing: 1 + Warning: 1 + ContentHash: 61a245cd985261ac + EnableTagOverride: false +operation: + description: The operation performed. + returned: changed + type: str + sample: update +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, + OPERATION_CREATE, + OPERATION_UPDATE, + OPERATION_DELETE, + _ConsulModule +) + +_CHECK_MUTUALLY_EXCLUSIVE = [('args', 'ttl', 'tcp', 'http')] +_CHECK_REQUIRED_BY = { + 'args': 'interval', + 'http': 'interval', + 'tcp': 'interval', +} + +_ARGUMENT_SPEC = { + "state": dict(default="present", choices=["present", "absent"]), + "name": dict(type='str'), + "id": dict(type='str'), + "tags": dict(type='list', elements='str'), + "address": dict(type='str'), + "meta": dict(type='dict'), + "service_port": dict(type='int'), + "enable_tag_override": dict(type='bool', default=False), + "weights": dict(type='dict', options=dict( + passing=dict(type='int', default=1, no_log=False), + warning=dict(type='int', default=1) + ), default={"passing": 1, "warning": 1}) +} + +_REQUIRED_IF = [ + ('state', 'present', ['name']), + ('state', 'absent', ('id', 'name'), True), +] + +_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) + + +class ConsulAgentServiceModule(_ConsulModule): + api_endpoint = "agent/service" + result_key = "service" + unique_identifiers = ["id", "name"] + operational_attributes = {"Service", "ContentHash", "Datacenter"} + + def endpoint_url(self, operation, identifier=None): + if operation in [OPERATION_CREATE, OPERATION_UPDATE]: + return "/".join([self.api_endpoint, "register"]) + if operation == OPERATION_DELETE: + return "/".join([self.api_endpoint, "deregister", identifier]) + + return super(ConsulAgentServiceModule, self).endpoint_url(operation, identifier) + + def prepare_object(self, existing, obj): + existing = super(ConsulAgentServiceModule, self).prepare_object(existing, obj) + if "ServicePort" in existing: + existing["Port"] = existing.pop("ServicePort") + + if "ID" not in existing: + existing["ID"] = existing["Name"] + + return existing + + def needs_update(self, api_obj, module_obj): + obj = {} + if "Service" in api_obj: + obj["Service"] = api_obj["Service"] + api_obj = self.prepare_object(api_obj, obj) + + if "Name" in module_obj: + module_obj["Service"] = module_obj.pop("Name") + if "ServicePort" in module_obj: + module_obj["Port"] = module_obj.pop("ServicePort") + + return super(ConsulAgentServiceModule, self).needs_update(api_obj, module_obj) + + def delete_object(self, obj): + if not self._module.check_mode: + url = self.endpoint_url(OPERATION_DELETE, self.id_from_obj(obj, camel_case=True)) + self.put(url) + return {} + + +def main(): + module = AnsibleModule( + _ARGUMENT_SPEC, + required_if=_REQUIRED_IF, + supports_check_mode=True, + ) + + consul_module = ConsulAgentServiceModule(module) + consul_module.execute() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/consul_auth_method.py b/plugins/modules/consul_auth_method.py new file mode 100644 index 0000000000..88842662bb --- /dev/null +++ b/plugins/modules/consul_auth_method.py @@ -0,0 +1,204 @@ +#!/usr/bin/python +# +# Copyright (c) 2024, Florian Apolloner (@apollo13) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: consul_auth_method +short_description: Manipulate Consul auth methods +version_added: 8.3.0 +description: + - Allows the addition, modification and deletion of auth methods in a Consul cluster using the agent. For more details on + using and configuring ACLs, see U(https://www.consul.io/docs/guides/acl.html). +author: + - Florian Apolloner (@apollo13) +extends_documentation_fragment: + - community.general.consul + - community.general.consul.actiongroup_consul + - community.general.consul.token + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: partial + details: + - In check mode the diff misses operational attributes. +options: + state: + description: + - Whether the token should be present or absent. + choices: ['present', 'absent'] + default: present + type: str + name: + description: + - Specifies a name for the ACL auth method. + - The name can contain alphanumeric characters, dashes C(-), and underscores C(_). + type: str + required: true + type: + description: + - The type of auth method being configured. + - This field is immutable. + - Required when the auth method is created. + type: str + choices: ['kubernetes', 'jwt', 'oidc', 'aws-iam'] + description: + description: + - Free form human readable description of the auth method. + type: str + display_name: + description: + - An optional name to use instead of O(name) when displaying information about this auth method. + type: str + max_token_ttl: + description: + - This specifies the maximum life of any token created by this auth method. + - Can be specified in the form of V(60s) or V(5m) (that is, 60 seconds or 5 minutes, respectively). + type: str + token_locality: + description: + - Defines the kind of token that this auth method should produce. + type: str + choices: ['local', 'global'] + config: + description: + - The raw configuration to use for the chosen auth method. + - Contents vary depending upon the O(type) chosen. + - Required when the auth method is created. + type: dict +""" + +EXAMPLES = r""" +- name: Create an auth method + community.general.consul_auth_method: + name: test + type: jwt + config: + jwt_validation_pubkeys: + - | + -----BEGIN PUBLIC KEY----- + MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu1SU1LfVLPHCozMxH2Mo + 4lgOEePzNm0tRgeLezV6ffAt0gunVTLw7onLRnrq0/IzW7yWR7QkrmBL7jTKEn5u + +qKhbwKfBstIs+bMY2Zkp18gnTxKLxoS2tFczGkPLPgizskuemMghRniWaoLcyeh + kd3qqGElvW/VDL5AaWTg0nLVkjRo9z+40RQzuVaE8AkAFmxZzow3x+VJYKdjykkJ + 0iT9wCS0DRTXu269V264Vf/3jvredZiKRkgwlL9xNAwxXFg0x/XFw005UWVRIkdg + cKWTjpBP2dPwVZ4WWC+9aGVd+Gyn1o0CLelf4rEjGoXbAAEgAqeGUxrcIlbjXfbc + mwIDAQAB + -----END PUBLIC KEY----- + token: "{{ consul_management_token }}" + +- name: Delete auth method + community.general.consul_auth_method: + name: test + state: absent + token: "{{ consul_management_token }}" +""" + +RETURN = r""" +auth_method: + description: The auth method as returned by the Consul HTTP API. + returned: always + type: dict + sample: + Config: + JWTValidationPubkeys: + - |- + -----BEGIN PUBLIC KEY----- + MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu1SU1LfVLPHCozMxH2Mo + 4lgOEePzNm0tRgeLezV6ffAt0gunVTLw7onLRnrq0/IzW7yWR7QkrmBL7jTKEn5u + +qKhbwKfBstIs+bMY2Zkp18gnTxKLxoS2tFczGkPLPgizskuemMghRniWaoLcyeh + kd3qqGElvW/VDL5AaWTg0nLVkjRo9z+40RQzuVaE8AkAFmxZzow3x+VJYKdjykkJ + 0iT9wCS0DRTXu269V264Vf/3jvredZiKRkgwlL9xNAwxXFg0x/XFw005UWVRIkdg + cKWTjpBP2dPwVZ4WWC+9aGVd+Gyn1o0CLelf4rEjGoXbAAEgAqeGUxrcIlbjXfbc + mwIDAQAB + -----END PUBLIC KEY----- + CreateIndex: 416 + ModifyIndex: 487 + Name: test + Type: jwt +operation: + description: The operation performed. + returned: changed + type: str + sample: update +""" + +import re + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, + _ConsulModule, + camel_case_key, +) + + +def normalize_ttl(ttl): + matches = re.findall(r"(\d+)(:h|m|s)", ttl) + ttl = 0 + for value, unit in matches: + value = int(value) + if unit == "m": + value *= 60 + elif unit == "h": + value *= 60 * 60 + ttl += value + + new_ttl = "" + hours, remainder = divmod(ttl, 3600) + if hours: + new_ttl += "{0}h".format(hours) + minutes, seconds = divmod(remainder, 60) + if minutes: + new_ttl += "{0}m".format(minutes) + if seconds: + new_ttl += "{0}s".format(seconds) + return new_ttl + + +class ConsulAuthMethodModule(_ConsulModule): + api_endpoint = "acl/auth-method" + result_key = "auth_method" + unique_identifiers = ["name"] + + def map_param(self, k, v, is_update): + if k == "config" and v: + v = {camel_case_key(k2): v2 for k2, v2 in v.items()} + return super(ConsulAuthMethodModule, self).map_param(k, v, is_update) + + def needs_update(self, api_obj, module_obj): + if "MaxTokenTTL" in module_obj: + module_obj["MaxTokenTTL"] = normalize_ttl(module_obj["MaxTokenTTL"]) + return super(ConsulAuthMethodModule, self).needs_update(api_obj, module_obj) + + +_ARGUMENT_SPEC = { + "name": dict(type="str", required=True), + "type": dict(type="str", choices=["kubernetes", "jwt", "oidc", "aws-iam"]), + "description": dict(type="str"), + "display_name": dict(type="str"), + "max_token_ttl": dict(type="str", no_log=False), + "token_locality": dict(type="str", choices=["local", "global"]), + "config": dict(type="dict"), + "state": dict(default="present", choices=["present", "absent"]), +} +_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) + + +def main(): + module = AnsibleModule( + _ARGUMENT_SPEC, + supports_check_mode=True, + ) + consul_module = ConsulAuthMethodModule(module) + consul_module.execute() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/consul_binding_rule.py b/plugins/modules/consul_binding_rule.py new file mode 100644 index 0000000000..de1fae9357 --- /dev/null +++ b/plugins/modules/consul_binding_rule.py @@ -0,0 +1,181 @@ +#!/usr/bin/python +# +# Copyright (c) 2024, Florian Apolloner (@apollo13) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: consul_binding_rule +short_description: Manipulate Consul binding rules +version_added: 8.3.0 +description: + - Allows the addition, modification and deletion of binding rules in a Consul cluster using the agent. For more details + on using and configuring binding rules, see U(https://developer.hashicorp.com/consul/api-docs/acl/binding-rules). +author: + - Florian Apolloner (@apollo13) +extends_documentation_fragment: + - community.general.consul + - community.general.consul.actiongroup_consul + - community.general.consul.token + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: partial + details: + - In check mode the diff misses operational attributes. +options: + state: + description: + - Whether the binding rule should be present or absent. + choices: ['present', 'absent'] + default: present + type: str + name: + description: + - Specifies a name for the binding rule. + - 'Note: This is used to identify the binding rule. But since the API does not support a name, it is prefixed to the + description.' + type: str + required: true + description: + description: + - Free form human readable description of the binding rule. + type: str + auth_method: + description: + - The name of the auth method that this rule applies to. + type: str + required: true + selector: + description: + - Specifies the expression used to match this rule against valid identities returned from an auth method validation. + - If empty this binding rule matches all valid identities returned from the auth method. + type: str + bind_type: + description: + - Specifies the way the binding rule affects a token created at login. + type: str + choices: [service, node, role, templated-policy] + bind_name: + description: + - The name to bind to a token at login-time. + - What it binds to can be adjusted with different values of the O(bind_type) parameter. + type: str + bind_vars: + description: + - Specifies the templated policy variables when O(bind_type) is set to V(templated-policy). + type: dict +""" + +EXAMPLES = r""" +- name: Create a binding rule + community.general.consul_binding_rule: + name: my_name + description: example rule + auth_method: minikube + bind_type: service + bind_name: "{{ serviceaccount.name }}" + token: "{{ consul_management_token }}" + +- name: Remove a binding rule + community.general.consul_binding_rule: + name: my_name + auth_method: minikube + state: absent +""" + +RETURN = r""" +binding_rule: + description: The binding rule as returned by the Consul HTTP API. + returned: always + type: dict + sample: + Description: "my_name: example rule" + AuthMethod: minikube + Selector: serviceaccount.namespace==default + BindType: service + BindName: "{{ serviceaccount.name }}" + CreateIndex: 30 + ID: 59c8a237-e481-4239-9202-45f117950c5f + ModifyIndex: 33 +operation: + description: The operation performed. + returned: changed + type: str + sample: update +""" + + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, + RequestError, + _ConsulModule, +) + + +class ConsulBindingRuleModule(_ConsulModule): + api_endpoint = "acl/binding-rule" + result_key = "binding_rule" + unique_identifiers = ["id"] + + def read_object(self): + url = "acl/binding-rules?authmethod={0}".format(self.params["auth_method"]) + try: + results = self.get(url) + for result in results: + if result.get("Description").startswith( + "{0}: ".format(self.params["name"]) + ): + return result + except RequestError as e: + if e.status == 404: + return + elif e.status == 403 and b"ACL not found" in e.response_data: + return + raise + + def module_to_obj(self, is_update): + obj = super(ConsulBindingRuleModule, self).module_to_obj(is_update) + del obj["Name"] + return obj + + def prepare_object(self, existing, obj): + final = super(ConsulBindingRuleModule, self).prepare_object(existing, obj) + name = self.params["name"] + description = final.pop("Description", "").split(": ", 1)[-1] + final["Description"] = "{0}: {1}".format(name, description) + return final + + +_ARGUMENT_SPEC = { + "name": dict(type="str", required=True), + "description": dict(type="str"), + "auth_method": dict(type="str", required=True), + "selector": dict(type="str"), + "bind_type": dict( + type="str", choices=["service", "node", "role", "templated-policy"] + ), + "bind_name": dict(type="str"), + "bind_vars": dict(type="dict"), + "state": dict(default="present", choices=["present", "absent"]), +} +_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) + + +def main(): + module = AnsibleModule( + _ARGUMENT_SPEC, + supports_check_mode=True, + ) + consul_module = ConsulBindingRuleModule(module) + consul_module.execute() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/consul_kv.py b/plugins/modules/consul_kv.py index 3419e3322b..d9354e62c5 100644 --- a/plugins/modules/consul_kv.py +++ b/plugins/modules/consul_kv.py @@ -1,113 +1,112 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (c) 2015, Steve Gargan # Copyright (c) 2018 Genome Research Ltd. # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: consul_kv -short_description: Manipulate entries in the key/value store of a consul cluster +short_description: Manipulate entries in the key/value store of a Consul cluster description: - - Allows the retrieval, addition, modification and deletion of key/value entries in a - consul cluster via the agent. The entire contents of the record, including - the indices, flags and session are returned as C(value). - - If the C(key) represents a prefix then note that when a value is removed, the existing - value if any is returned as part of the results. + - Allows the retrieval, addition, modification and deletion of key/value entries in a Consul cluster using the agent. The + entire contents of the record, including the indices, flags and session are returned as C(value). + - If the O(key) represents a prefix then note that when a value is removed, the existing value if any is returned as part + of the results. - See http://www.consul.io/docs/agent/http.html#kv for more details. requirements: - - python-consul + - py-consul - requests author: - Steve Gargan (@sgargan) - Colin Nolan (@colin-nolan) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - The action to take with the supplied key and value. If the state is C(present) and I(value) is set, the key - contents will be set to the value supplied and C(changed) will be set to C(true) only if the value was - different to the current contents. If the state is C(present) and I(value) is not set, the existing value - associated to the key will be returned. The state C(absent) will remove the key/value pair, - again C(changed) will be set to true only if the key actually existed - prior to the removal. An attempt can be made to obtain or free the - lock associated with a key/value pair with the states C(acquire) or - C(release) respectively. a valid session must be supplied to make the - attempt changed will be true if the attempt is successful, false - otherwise. - type: str - choices: [ absent, acquire, present, release ] - default: present - key: - description: - - The key at which the value should be stored. - type: str - required: true - value: - description: - - The value should be associated with the given key, required if C(state) - is C(present). - type: str - recurse: - description: - - If the key represents a prefix, each entry with the prefix can be - retrieved by setting this to C(true). - type: bool - retrieve: - description: - - If the I(state) is C(present) and I(value) is set, perform a - read after setting the value and return this value. - default: true - type: bool - session: - description: - - The session that should be used to acquire or release a lock - associated with a key/value pair. - type: str - token: - description: - - The token key identifying an ACL rule set that controls access to - the key value pair - type: str - cas: - description: - - Used when acquiring a lock with a session. If the C(cas) is C(0), then - Consul will only put the key if it does not already exist. If the - C(cas) value is non-zero, then the key is only set if the index matches - the ModifyIndex of that key. - type: str - flags: - description: - - Opaque positive integer value that can be passed when setting a value. - type: str - host: - description: - - Host of the consul agent. - type: str - default: localhost - port: - description: - - The port on which the consul agent is running. - type: int - default: 8500 - scheme: - description: - - The protocol scheme on which the consul agent is running. - type: str - default: http - validate_certs: - description: - - Whether to verify the tls certificate of the consul agent. - type: bool - default: true -''' + state: + description: + - The action to take with the supplied key and value. If the state is V(present) and O(value) is set, the key contents + is set to the value supplied and C(changed) is set to V(true) only if the value was different to the current contents. + If the state is V(present) and O(value) is not set, the existing value associated to the key is returned. The state + V(absent) is used to remove the key/value pair, again C(changed) is set to V(true) only if the key actually existed + prior to the removal. An attempt can be made to obtain or free the lock associated with a key/value pair with the + states V(acquire) or V(release) respectively. A valid session must be supplied to make the attempt C(changed) is V(true) + if the attempt is successful, V(false) otherwise. + type: str + choices: [absent, acquire, present, release] + default: present + key: + description: + - The key at which the value should be stored. + type: str + required: true + value: + description: + - The value should be associated with the given key, required if O(state) is V(present). + type: str + recurse: + description: + - If the key represents a prefix, each entry with the prefix can be retrieved by setting this to V(true). + type: bool + retrieve: + description: + - If the O(state) is V(present) and O(value) is set, perform a read after setting the value and return this value. + default: true + type: bool + session: + description: + - The session that should be used to acquire or release a lock associated with a key/value pair. + type: str + token: + description: + - The token key identifying an ACL rule set that controls access to the key value pair. + type: str + cas: + description: + - Used when acquiring a lock with a session. If the O(cas) is V(0), then Consul only puts the key if it does not already + exist. If the O(cas) value is non-zero, then the key is only set if the index matches the ModifyIndex of that key. + type: str + flags: + description: + - Opaque positive integer value that can be passed when setting a value. + type: str + host: + description: + - Host of the Consul agent. + type: str + default: localhost + port: + description: + - The port on which the Consul agent is running. + type: int + default: 8500 + scheme: + description: + - The protocol scheme on which the Consul agent is running. + type: str + default: http + validate_certs: + description: + - Whether to verify the tls certificate of the Consul agent. + type: bool + default: true + datacenter: + description: + - The name of the datacenter to query. If unspecified, the query defaults to the datacenter of the Consul agent on O(host). + type: str + version_added: 10.0.0 +""" -EXAMPLES = ''' +EXAMPLES = r""" # If the key does not exist, the value associated to the "data" property in `retrieved_key` will be `None` # If the key value is empty string, `retrieved_key["data"]["Value"]` will be `None` - name: Retrieve a value from the key/value store @@ -125,7 +124,7 @@ EXAMPLES = ''' key: somekey state: absent -- name: Add a node to an arbitrary group via consul inventory (see consul.ini) +- name: Add a node to an arbitrary group using Consul inventory (see consul.ini) community.general.consul_kv: key: ansible/groups/dc1/somenode value: top_secret @@ -136,7 +135,7 @@ EXAMPLES = ''' value: 20160509 session: "{{ sessionid }}" state: acquire -''' +""" from ansible.module_utils.common.text.converters import to_text @@ -149,8 +148,8 @@ except ImportError: from ansible.module_utils.basic import AnsibleModule -# Note: although the python-consul documentation implies that using a key with a value of `None` with `put` has a -# special meaning (https://python-consul.readthedocs.io/en/latest/#consul-kv), if not set in the subsequently API call, +# Note: although the py-consul implementation implies that using a key with a value of `None` with `put` has a special +# meaning (https://github.com/criteo/py-consul/blob/master/consul/api/kv.py), if not set in the subsequently API call, # the value just defaults to an empty string (https://www.consul.io/api/kv.html#create-update-key) NOT_SET = None @@ -284,7 +283,8 @@ def get_consul_api(module): port=module.params.get('port'), scheme=module.params.get('scheme'), verify=module.params.get('validate_certs'), - token=module.params.get('token')) + token=module.params.get('token'), + dc=module.params.get('datacenter')) def test_dependencies(module): @@ -298,6 +298,7 @@ def main(): module = AnsibleModule( argument_spec=dict( cas=dict(type='str'), + datacenter=dict(type='str'), flags=dict(type='str'), key=dict(type='str', required=True, no_log=False), host=dict(type='str', default='localhost'), diff --git a/plugins/modules/consul_policy.py b/plugins/modules/consul_policy.py new file mode 100644 index 0000000000..95d2ac48d0 --- /dev/null +++ b/plugins/modules/consul_policy.py @@ -0,0 +1,162 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Håkon Lerring +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: consul_policy +short_description: Manipulate Consul policies +version_added: 7.2.0 +description: + - Allows the addition, modification and deletion of policies in a Consul cluster using the agent. For more details on using + and configuring ACLs, see U(https://www.consul.io/docs/guides/acl.html). +author: + - Håkon Lerring (@Hakon) +extends_documentation_fragment: + - community.general.consul + - community.general.consul.actiongroup_consul + - community.general.consul.token + - community.general.attributes +attributes: + check_mode: + support: full + version_added: 8.3.0 + diff_mode: + support: partial + version_added: 8.3.0 + details: + - In check mode the diff misses operational attributes. + action_group: + version_added: 8.3.0 +options: + state: + description: + - Whether the policy should be present or absent. + choices: ['present', 'absent'] + default: present + type: str + valid_datacenters: + description: + - Valid datacenters for the policy. All if list is empty. + type: list + elements: str + name: + description: + - The name that should be associated with the policy, this is opaque to Consul. + required: true + type: str + description: + description: + - Description of the policy. + type: str + rules: + type: str + description: + - Rule document that should be associated with the current policy. +""" + +EXAMPLES = r""" +- name: Create a policy with rules + community.general.consul_policy: + host: consul1.example.com + token: some_management_acl + name: foo-access + rules: | + key "foo" { + policy = "read" + } + key "private/foo" { + policy = "deny" + } + +- name: Update the rules associated to a policy + community.general.consul_policy: + host: consul1.example.com + token: some_management_acl + name: foo-access + rules: | + key "foo" { + policy = "read" + } + key "private/foo" { + policy = "deny" + } + event "bbq" { + policy = "write" + } + +- name: Remove a policy + community.general.consul_policy: + host: consul1.example.com + token: some_management_acl + name: foo-access + state: absent +""" + +RETURN = r""" +policy: + description: The policy as returned by the Consul HTTP API. + returned: always + type: dict + sample: + CreateIndex: 632 + Description: Testing + Hash: rj5PeDHddHslkpW7Ij4OD6N4bbSXiecXFmiw2SYXg2A= + Name: foo-access + Rules: |- + key "foo" { + policy = "read" + } + key "private/foo" { + policy = "deny" + } +operation: + description: The operation performed. + returned: changed + type: str + sample: update +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, + OPERATION_READ, + _ConsulModule, +) + +_ARGUMENT_SPEC = { + "name": dict(required=True), + "description": dict(type="str"), + "rules": dict(type="str"), + "valid_datacenters": dict(type="list", elements="str"), + "state": dict(default="present", choices=["present", "absent"]), +} +_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) + + +class ConsulPolicyModule(_ConsulModule): + api_endpoint = "acl/policy" + result_key = "policy" + unique_identifiers = ["id"] + + def endpoint_url(self, operation, identifier=None): + if operation == OPERATION_READ: + return [self.api_endpoint, "name", self.params["name"]] + return super(ConsulPolicyModule, self).endpoint_url(operation, identifier) + + +def main(): + module = AnsibleModule( + _ARGUMENT_SPEC, + supports_check_mode=True, + ) + consul_module = ConsulPolicyModule(module) + consul_module.execute() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/consul_role.py b/plugins/modules/consul_role.py new file mode 100644 index 0000000000..968de022a2 --- /dev/null +++ b/plugins/modules/consul_role.py @@ -0,0 +1,283 @@ +#!/usr/bin/python + +# Copyright (c) 2022, Håkon Lerring +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: consul_role +short_description: Manipulate Consul roles +version_added: 7.5.0 +description: + - Allows the addition, modification and deletion of roles in a Consul cluster using the agent. For more details on using + and configuring ACLs, see U(https://www.consul.io/docs/guides/acl.html). +author: + - Håkon Lerring (@Hakon) +extends_documentation_fragment: + - community.general.consul + - community.general.consul.token + - community.general.consul.actiongroup_consul + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: partial + details: + - In check mode the diff misses operational attributes. + version_added: 8.3.0 + action_group: + version_added: 8.3.0 +options: + name: + description: + - A name used to identify the role. + required: true + type: str + state: + description: + - Whether the role should be present or absent. + choices: ['present', 'absent'] + default: present + type: str + description: + description: + - Description of the role. + - If not specified, the assigned description is not changed. + type: str + policies: + type: list + elements: dict + description: + - List of policies to attach to the role. Each policy is a dict. + - If the parameter is left blank, any policies currently assigned are not changed. + - Any empty array (V([])) clears any policies previously set. + suboptions: + name: + description: + - The name of the policy to attach to this role; see M(community.general.consul_policy) for more info. + - Either this or O(policies[].id) must be specified. + type: str + id: + description: + - The ID of the policy to attach to this role; see M(community.general.consul_policy) for more info. + - Either this or O(policies[].name) must be specified. + type: str + templated_policies: + description: + - The list of templated policies that should be applied to the role. + type: list + elements: dict + version_added: 8.3.0 + suboptions: + template_name: + description: + - The templated policy name. + type: str + required: true + template_variables: + description: + - The templated policy variables. + - Not all templated policies require variables. + type: dict + service_identities: + type: list + elements: dict + description: + - List of service identities to attach to the role. + - If not specified, any service identities currently assigned are not changed. + - If the parameter is an empty array (V([])), any node identities assigned are unassigned. + suboptions: + service_name: + description: + - The name of the node. + - Must not be longer than 256 characters, must start and end with a lowercase alphanumeric character. + - May only contain lowercase alphanumeric characters as well as V(-) and V(_). + - This suboption has been renamed from O(service_identities[].name) to O(service_identities[].service_name) in community.general + 8.3.0. The old name can still be used. + type: str + required: true + aliases: + - name + datacenters: + description: + - The datacenters where the policies are effective. + - This results in effective policy only being valid in this datacenter. + - If an empty array (V([])) is specified, the policies are valid in all datacenters. + - Including those which do not yet exist but may in the future. + type: list + elements: str + node_identities: + type: list + elements: dict + description: + - List of node identities to attach to the role. + - If not specified, any node identities currently assigned are not changed. + - If the parameter is an empty array (V([])), any node identities assigned are unassigned. + suboptions: + node_name: + description: + - The name of the node. + - Must not be longer than 256 characters, must start and end with a lowercase alphanumeric character. + - May only contain lowercase alphanumeric characters as well as V(-) and V(_). + - This suboption has been renamed from O(node_identities[].name) to O(node_identities[].node_name) in community.general + 8.3.0. The old name can still be used. + type: str + required: true + aliases: + - name + datacenter: + description: + - The nodes datacenter. + - This results in effective policy only being valid in this datacenter. + type: str + required: true +""" + +EXAMPLES = r""" +- name: Create a role with 2 policies + community.general.consul_role: + host: consul1.example.com + token: some_management_acl + name: foo-role + policies: + - id: 783beef3-783f-f41f-7422-7087dc272765 + - name: "policy-1" + +- name: Create a role with service identity + community.general.consul_role: + host: consul1.example.com + token: some_management_acl + name: foo-role-2 + service_identities: + - name: web + datacenters: + - dc1 + +- name: Create a role with node identity + community.general.consul_role: + host: consul1.example.com + token: some_management_acl + name: foo-role-3 + node_identities: + - name: node-1 + datacenter: dc2 + +- name: Remove a role + community.general.consul_role: + host: consul1.example.com + token: some_management_acl + name: foo-role-3 + state: absent +""" + +RETURN = r""" +role: + description: The role object. + returned: success + type: dict + sample: + { + "CreateIndex": 39, + "Description": "", + "Hash": "Trt0QJtxVEfvTTIcdTUbIJRr6Dsi6E4EcwSFxx9tCYM=", + "ID": "9a300b8d-48db-b720-8544-a37c0f5dafb5", + "ModifyIndex": 39, + "Name": "foo-role", + "Policies": [ + { + "ID": "b1a00172-d7a1-0e66-a12e-7a4045c4b774", + "Name": "foo-access" + } + ] + } +operation: + description: The operation performed on the role. + returned: changed + type: str + sample: update +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, + OPERATION_READ, + _ConsulModule, +) + + +class ConsulRoleModule(_ConsulModule): + api_endpoint = "acl/role" + result_key = "role" + unique_identifiers = ["id"] + + def endpoint_url(self, operation, identifier=None): + if operation == OPERATION_READ: + return [self.api_endpoint, "name", self.params["name"]] + return super(ConsulRoleModule, self).endpoint_url(operation, identifier) + + +NAME_ID_SPEC = dict( + name=dict(type="str"), + id=dict(type="str"), +) + +NODE_ID_SPEC = dict( + node_name=dict(type="str", required=True, aliases=["name"]), + datacenter=dict(type="str", required=True), +) + +SERVICE_ID_SPEC = dict( + service_name=dict(type="str", required=True, aliases=["name"]), + datacenters=dict(type="list", elements="str"), +) + +TEMPLATE_POLICY_SPEC = dict( + template_name=dict(type="str", required=True), + template_variables=dict(type="dict"), +) + +_ARGUMENT_SPEC = { + "name": dict(type="str", required=True), + "description": dict(type="str"), + "policies": dict( + type="list", + elements="dict", + options=NAME_ID_SPEC, + mutually_exclusive=[("name", "id")], + required_one_of=[("name", "id")], + ), + "templated_policies": dict( + type="list", + elements="dict", + options=TEMPLATE_POLICY_SPEC, + ), + "node_identities": dict( + type="list", + elements="dict", + options=NODE_ID_SPEC, + ), + "service_identities": dict( + type="list", + elements="dict", + options=SERVICE_ID_SPEC, + ), + "state": dict(default="present", choices=["present", "absent"]), +} +_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) + + +def main(): + module = AnsibleModule( + _ARGUMENT_SPEC, + supports_check_mode=True, + ) + consul_module = ConsulRoleModule(module) + consul_module.execute() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/consul_session.py b/plugins/modules/consul_session.py index 062eb3befe..acfb8e5504 100644 --- a/plugins/modules/consul_session.py +++ b/plugins/modules/consul_session.py @@ -1,116 +1,90 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2015, Steve Gargan # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: consul_session -short_description: Manipulate consul sessions +short_description: Manipulate Consul sessions description: - - Allows the addition, modification and deletion of sessions in a consul - cluster. These sessions can then be used in conjunction with key value pairs - to implement distributed locks. In depth documentation for working with - sessions can be found at http://www.consul.io/docs/internals/sessions.html -requirements: - - python-consul - - requests + - Allows the addition, modification and deletion of sessions in a Consul cluster. These sessions can then be used in conjunction + with key value pairs to implement distributed locks. In depth documentation for working with sessions can be found at + U(http://www.consul.io/docs/internals/sessions.html). author: -- Steve Gargan (@sgargan) + - Steve Gargan (@sgargan) + - Håkon Lerring (@Hakon) +extends_documentation_fragment: + - community.general.consul + - community.general.consul.actiongroup_consul + - community.general.consul.token + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none + action_group: + version_added: 8.3.0 options: - id: - description: - - ID of the session, required when I(state) is either C(info) or - C(remove). - type: str - state: - description: - - Whether the session should be present i.e. created if it doesn't - exist, or absent, removed if present. If created, the I(id) for the - session is returned in the output. If C(absent), I(id) is - required to remove the session. Info for a single session, all the - sessions for a node or all available sessions can be retrieved by - specifying C(info), C(node) or C(list) for the I(state); for C(node) - or C(info), the node I(name) or session I(id) is required as parameter. - choices: [ absent, info, list, node, present ] - type: str - default: present - name: - description: - - The name that should be associated with the session. Required when - I(state=node) is used. - type: str - delay: - description: - - The optional lock delay that can be attached to the session when it - is created. Locks for invalidated sessions ar blocked from being - acquired until this delay has expired. Durations are in seconds. - type: int - default: 15 - node: - description: - - The name of the node that with which the session will be associated. - by default this is the name of the agent. - type: str - datacenter: - description: - - The name of the datacenter in which the session exists or should be - created. - type: str - checks: - description: - - Checks that will be used to verify the session health. If - all the checks fail, the session will be invalidated and any locks - associated with the session will be release and can be acquired once - the associated lock delay has expired. - type: list - elements: str - host: - description: - - The host of the consul agent defaults to localhost. - type: str - default: localhost - port: - description: - - The port on which the consul agent is running. - type: int - default: 8500 - scheme: - description: - - The protocol scheme on which the consul agent is running. - type: str - default: http - validate_certs: - description: - - Whether to verify the TLS certificate of the consul agent. - type: bool - default: true - behavior: - description: - - The optional behavior that can be attached to the session when it - is created. This controls the behavior when a session is invalidated. - choices: [ delete, release ] - type: str - default: release - ttl: - description: - - Specifies the duration of a session in seconds (between 10 and 86400). - type: int - version_added: 5.4.0 - token: - description: - - The token key identifying an ACL rule set that controls access to - the key value pair. - type: str - version_added: 5.6.0 -''' + id: + description: + - ID of the session, required when O(state) is either V(info) or V(remove). + type: str + state: + description: + - Whether the session should be present, in other words it should be created if it does not exist, or absent, removed + if present. If created, the O(id) for the session is returned in the output. If V(absent), O(id) is required to remove + the session. Info for a single session, all the sessions for a node or all available sessions can be retrieved by + specifying V(info), V(node) or V(list) for the O(state); for V(node) or V(info), the node O(name) or session O(id) + is required as parameter. + choices: [absent, info, list, node, present] + type: str + default: present + name: + description: + - The name that should be associated with the session. Required when O(state=node) is used. + type: str + delay: + description: + - The optional lock delay that can be attached to the session when it is created. Locks for invalidated sessions ar + blocked from being acquired until this delay has expired. Durations are in seconds. + type: int + default: 15 + node: + description: + - The name of the node that with which the session is associated. By default this is the name of the agent. + type: str + datacenter: + description: + - The name of the datacenter in which the session exists or should be created. + type: str + checks: + description: + - Checks that are used to verify the session health. If all the checks fail, the session is invalidated and any locks + associated with the session are released and can be acquired once the associated lock delay has expired. + type: list + elements: str + behavior: + description: + - The optional behavior that can be attached to the session when it is created. This controls the behavior when a session + is invalidated. + choices: [delete, release] + type: str + default: release + ttl: + description: + - Specifies the duration of a session in seconds (between 10 and 86400). + type: int + version_added: 5.4.0 + token: + version_added: 5.6.0 +""" -EXAMPLES = ''' -- name: Register basic session with consul +EXAMPLES = r""" +- name: Register basic session with Consul community.general.consul_session: name: session1 @@ -137,40 +111,53 @@ EXAMPLES = ''' - name: Register session with a ttl community.general.consul_session: name: session-with-ttl - ttl: 600 # sec -''' - -try: - import consul - from requests.exceptions import ConnectionError - python_consul_installed = True -except ImportError: - python_consul_installed = False + ttl: 600 # sec +""" from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, _ConsulModule +) -def execute(module): +def execute(module, consul_module): state = module.params.get('state') if state in ['info', 'list', 'node']: - lookup_sessions(module) + lookup_sessions(module, consul_module) elif state == 'present': - update_session(module) + update_session(module, consul_module) else: - remove_session(module) + remove_session(module, consul_module) -def lookup_sessions(module): +def list_sessions(consul_module, datacenter): + return consul_module.get( + 'session/list', + params={'dc': datacenter}) + + +def list_sessions_for_node(consul_module, node, datacenter): + return consul_module.get( + ('session', 'node', node), + params={'dc': datacenter}) + + +def get_session_info(consul_module, session_id, datacenter): + return consul_module.get( + ('session', 'info', session_id), + params={'dc': datacenter}) + + +def lookup_sessions(module, consul_module): datacenter = module.params.get('datacenter') state = module.params.get('state') - consul_client = get_consul_api(module) try: if state == 'list': - sessions_list = consul_client.session.list(dc=datacenter) + sessions_list = list_sessions(consul_module, datacenter) # Ditch the index, this can be grabbed from the results if sessions_list and len(sessions_list) >= 2: sessions_list = sessions_list[1] @@ -178,14 +165,14 @@ def lookup_sessions(module): sessions=sessions_list) elif state == 'node': node = module.params.get('node') - sessions = consul_client.session.node(node, dc=datacenter) + sessions = list_sessions_for_node(consul_module, node, datacenter) module.exit_json(changed=True, node=node, sessions=sessions) elif state == 'info': session_id = module.params.get('id') - session_by_id = consul_client.session.info(session_id, dc=datacenter) + session_by_id = get_session_info(consul_module, session_id, datacenter) module.exit_json(changed=True, session_id=session_id, sessions=session_by_id) @@ -194,7 +181,26 @@ def lookup_sessions(module): module.fail_json(msg="Could not retrieve session info %s" % e) -def update_session(module): +def create_session(consul_module, name, behavior, ttl, node, + lock_delay, datacenter, checks): + create_data = { + "LockDelay": lock_delay, + "Node": node, + "Name": name, + "Checks": checks, + "Behavior": behavior, + } + if ttl is not None: + create_data["TTL"] = "%ss" % str(ttl) # TTL is in seconds + create_session_response_dict = consul_module.put( + 'session/create', + params={ + 'dc': datacenter}, + data=create_data) + return create_session_response_dict["ID"] + + +def update_session(module, consul_module): name = module.params.get('name') delay = module.params.get('delay') @@ -204,18 +210,16 @@ def update_session(module): behavior = module.params.get('behavior') ttl = module.params.get('ttl') - consul_client = get_consul_api(module) - try: - session = consul_client.session.create( - name=name, - behavior=behavior, - ttl=ttl, - node=node, - lock_delay=delay, - dc=datacenter, - checks=checks - ) + session = create_session(consul_module, + name=name, + behavior=behavior, + ttl=ttl, + node=node, + lock_delay=delay, + datacenter=datacenter, + checks=checks + ) module.exit_json(changed=True, session_id=session, name=name, @@ -228,13 +232,15 @@ def update_session(module): module.fail_json(msg="Could not create/update session %s" % e) -def remove_session(module): +def destroy_session(consul_module, session_id): + return consul_module.put(('session', 'destroy', session_id)) + + +def remove_session(module, consul_module): session_id = module.params.get('id') - consul_client = get_consul_api(module) - try: - consul_client.session.destroy(session_id) + destroy_session(consul_module, session_id) module.exit_json(changed=True, session_id=session_id) @@ -243,36 +249,31 @@ def remove_session(module): session_id, e)) -def get_consul_api(module): - return consul.Consul(host=module.params.get('host'), - port=module.params.get('port'), - scheme=module.params.get('scheme'), - verify=module.params.get('validate_certs'), - token=module.params.get('token')) - - -def test_dependencies(module): - if not python_consul_installed: - module.fail_json(msg="python-consul required for this module. " - "see https://python-consul.readthedocs.io/en/latest/#installation") - - def main(): argument_spec = dict( checks=dict(type='list', elements='str'), delay=dict(type='int', default='15'), - behavior=dict(type='str', default='release', choices=['release', 'delete']), + behavior=dict( + type='str', + default='release', + choices=[ + 'release', + 'delete']), ttl=dict(type='int'), - host=dict(type='str', default='localhost'), - port=dict(type='int', default=8500), - scheme=dict(type='str', default='http'), - validate_certs=dict(type='bool', default=True), id=dict(type='str'), name=dict(type='str'), node=dict(type='str'), - state=dict(type='str', default='present', choices=['absent', 'info', 'list', 'node', 'present']), + state=dict( + type='str', + default='present', + choices=[ + 'absent', + 'info', + 'list', + 'node', + 'present']), datacenter=dict(type='str'), - token=dict(type='str', no_log=True), + **AUTH_ARGUMENTS_SPEC ) module = AnsibleModule( @@ -284,14 +285,10 @@ def main(): ], supports_check_mode=False ) - - test_dependencies(module) + consul_module = _ConsulModule(module) try: - execute(module) - except ConnectionError as e: - module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % ( - module.params.get('host'), module.params.get('port'), e)) + execute(module, consul_module) except Exception as e: module.fail_json(msg=str(e)) diff --git a/plugins/modules/consul_token.py b/plugins/modules/consul_token.py new file mode 100644 index 0000000000..cbe49ee2af --- /dev/null +++ b/plugins/modules/consul_token.py @@ -0,0 +1,326 @@ +#!/usr/bin/python +# +# Copyright (c) 2024, Florian Apolloner (@apollo13) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: consul_token +short_description: Manipulate Consul tokens +version_added: 8.3.0 +description: + - Allows the addition, modification and deletion of tokens in a Consul cluster using the agent. For more details on using + and configuring ACLs, see U(https://www.consul.io/docs/guides/acl.html). +author: + - Florian Apolloner (@apollo13) +extends_documentation_fragment: + - community.general.consul + - community.general.consul.token + - community.general.consul.actiongroup_consul + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: partial + details: + - In check mode the diff misses operational attributes. + action_group: + version_added: 8.3.0 +options: + state: + description: + - Whether the token should be present or absent. + choices: ['present', 'absent'] + default: present + type: str + accessor_id: + description: + - Specifies a UUID to use as the token's Accessor ID. If not specified a UUID is generated for this field. + type: str + secret_id: + description: + - Specifies a UUID to use as the token's Secret ID. If not specified a UUID is generated for this field. + type: str + description: + description: + - Free form human readable description of the token. + type: str + policies: + type: list + elements: dict + description: + - List of policies to attach to the token. Each policy is a dict. + - If the parameter is left blank, any policies currently assigned are not changed. + - Any empty array (V([])) clears any policies previously set. + suboptions: + name: + description: + - The name of the policy to attach to this token; see M(community.general.consul_policy) for more info. + - Either this or O(policies[].id) must be specified. + type: str + id: + description: + - The ID of the policy to attach to this token; see M(community.general.consul_policy) for more info. + - Either this or O(policies[].name) must be specified. + type: str + roles: + type: list + elements: dict + description: + - List of roles to attach to the token. Each role is a dict. + - If the parameter is left blank, any roles currently assigned are not changed. + - Any empty array (V([])) clears any roles previously set. + suboptions: + name: + description: + - The name of the role to attach to this token; see M(community.general.consul_role) for more info. + - Either this or O(roles[].id) must be specified. + type: str + id: + description: + - The ID of the role to attach to this token; see M(community.general.consul_role) for more info. + - Either this or O(roles[].name) must be specified. + type: str + templated_policies: + description: + - The list of templated policies that should be applied to the role. + type: list + elements: dict + suboptions: + template_name: + description: + - The templated policy name. + type: str + required: true + template_variables: + description: + - The templated policy variables. + - Not all templated policies require variables. + type: dict + service_identities: + type: list + elements: dict + description: + - List of service identities to attach to the token. + - If not specified, any service identities currently assigned are not changed. + - If the parameter is an empty array (V([])), any node identities assigned are unassigned. + suboptions: + service_name: + description: + - The name of the service. + - Must not be longer than 256 characters, must start and end with a lowercase alphanumeric character. + - May only contain lowercase alphanumeric characters as well as V(-) and V(_). + type: str + required: true + datacenters: + description: + - The datacenters where the token is effective. + - If an empty array (V([])) is specified, the token is valid in all datacenters. + - Including those which do not yet exist but may in the future. + type: list + elements: str + node_identities: + type: list + elements: dict + description: + - List of node identities to attach to the token. + - If not specified, any node identities currently assigned are not changed. + - If the parameter is an empty array (V([])), any node identities assigned are unassigned. + suboptions: + node_name: + description: + - The name of the node. + - Must not be longer than 256 characters, must start and end with a lowercase alphanumeric character. + - May only contain lowercase alphanumeric characters as well as V(-) and V(_). + type: str + required: true + datacenter: + description: + - The nodes datacenter. + - This results in effective token only being valid in this datacenter. + type: str + required: true + local: + description: + - If true, indicates that the token should not be replicated globally and instead be local to the current datacenter. + type: bool + expiration_ttl: + description: + - This is a convenience field and if set it initializes the C(expiration_time). Can be specified in the form of V(60s) + or V(5m) (that is, 60 seconds or 5 minutes, respectively). Ingored when the token is updated! + type: str +""" + +EXAMPLES = r""" +- name: Create / Update a token by accessor_id + community.general.consul_token: + state: present + accessor_id: 07a7de84-c9c7-448a-99cc-beaf682efd21 + token: 8adddd91-0bd6-d41d-ae1a-3b49cfa9a0e8 + roles: + - name: role1 + - name: role2 + service_identities: + - service_name: service1 + datacenters: [dc1, dc2] + node_identities: + - node_name: node1 + datacenter: dc1 + expiration_ttl: 50m + +- name: Delete a token + community.general.consul_token: + state: absent + accessor_id: 07a7de84-c9c7-448a-99cc-beaf682efd21 + token: 8adddd91-0bd6-d41d-ae1a-3b49cfa9a0e8 +""" + +RETURN = r""" +token: + description: The token as returned by the Consul HTTP API. + returned: always + type: dict + sample: + AccessorID: 07a7de84-c9c7-448a-99cc-beaf682efd21 + CreateIndex: 632 + CreateTime: "2024-01-14T21:53:01.402749174+01:00" + Description: Testing + Hash: rj5PeDHddHslkpW7Ij4OD6N4bbSXiecXFmiw2SYXg2A= + Local: false + ModifyIndex: 633 + SecretID: bd380fba-da17-7cee-8576-8d6427c6c930 + ServiceIdentities: ["ServiceName": "test"] +operation: + description: The operation performed. + returned: changed + type: str + sample: update +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.consul import ( + AUTH_ARGUMENTS_SPEC, + _ConsulModule, +) + + +def normalize_link_obj(api_obj, module_obj, key): + api_objs = api_obj.get(key) + module_objs = module_obj.get(key) + if api_objs is None or module_objs is None: + return + name_to_id = {i["Name"]: i["ID"] for i in api_objs} + id_to_name = {i["ID"]: i["Name"] for i in api_objs} + + for obj in module_objs: + identifier = obj.get("ID") + name = obj.get("Name") + if identifier and not name and identifier in id_to_name: + obj["Name"] = id_to_name[identifier] + if not identifier and name and name in name_to_id: + obj["ID"] = name_to_id[name] + + +class ConsulTokenModule(_ConsulModule): + api_endpoint = "acl/token" + result_key = "token" + unique_identifiers = ["accessor_id"] + + create_only_fields = {"expiration_ttl"} + + def read_object(self): + # if `accessor_id` is not supplied we can only create objects and are not idempotent + if not self.id_from_obj(self.params): + return None + return super(ConsulTokenModule, self).read_object() + + def needs_update(self, api_obj, module_obj): + # SecretID is usually not supplied + if "SecretID" not in module_obj and "SecretID" in api_obj: + del api_obj["SecretID"] + normalize_link_obj(api_obj, module_obj, "Roles") + normalize_link_obj(api_obj, module_obj, "Policies") + # ExpirationTTL is only supported on create, not for update + # it writes to ExpirationTime, so we need to remove that as well + if "ExpirationTTL" in module_obj: + del module_obj["ExpirationTTL"] + return super(ConsulTokenModule, self).needs_update(api_obj, module_obj) + + +NAME_ID_SPEC = dict( + name=dict(type="str"), + id=dict(type="str"), +) + +NODE_ID_SPEC = dict( + node_name=dict(type="str", required=True), + datacenter=dict(type="str", required=True), +) + +SERVICE_ID_SPEC = dict( + service_name=dict(type="str", required=True), + datacenters=dict(type="list", elements="str"), +) + +TEMPLATE_POLICY_SPEC = dict( + template_name=dict(type="str", required=True), + template_variables=dict(type="dict"), +) + + +_ARGUMENT_SPEC = { + "description": dict(), + "accessor_id": dict(), + "secret_id": dict(no_log=True), + "roles": dict( + type="list", + elements="dict", + options=NAME_ID_SPEC, + mutually_exclusive=[("name", "id")], + required_one_of=[("name", "id")], + ), + "policies": dict( + type="list", + elements="dict", + options=NAME_ID_SPEC, + mutually_exclusive=[("name", "id")], + required_one_of=[("name", "id")], + ), + "templated_policies": dict( + type="list", + elements="dict", + options=TEMPLATE_POLICY_SPEC, + ), + "node_identities": dict( + type="list", + elements="dict", + options=NODE_ID_SPEC, + ), + "service_identities": dict( + type="list", + elements="dict", + options=SERVICE_ID_SPEC, + ), + "local": dict(type="bool"), + "expiration_ttl": dict(type="str"), + "state": dict(default="present", choices=["present", "absent"]), +} +_ARGUMENT_SPEC.update(AUTH_ARGUMENTS_SPEC) + + +def main(): + module = AnsibleModule( + _ARGUMENT_SPEC, + required_if=[("state", "absent", ["accessor_id"])], + supports_check_mode=True, + ) + consul_module = ConsulTokenModule(module) + consul_module.execute() + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/copr.py b/plugins/modules/copr.py index 68738a6c98..4d627ceb8f 100644 --- a/plugins/modules/copr.py +++ b/plugins/modules/copr.py @@ -1,50 +1,66 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2020, Silvie Chlupova # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations DOCUMENTATION = r""" ---- module: copr short_description: Manage one of the Copr repositories version_added: 2.0.0 description: This module can enable, disable or remove the specified repository. author: Silvie Chlupova (@schlupov) requirements: - - dnf - - dnf-plugins-core + - dnf + - dnf-plugins-core notes: - - Supports C(check_mode). + - Supports C(check_mode). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - host: - description: The Copr host to work with. - default: copr.fedorainfracloud.org - type: str - protocol: - description: This indicate which protocol to use with the host. - default: https - type: str - name: - description: Copr directory name, for example C(@copr/copr-dev). - required: true - type: str - state: - description: - - Whether to set this project as C(enabled), C(disabled) or C(absent). - default: enabled - type: str - choices: [absent, enabled, disabled] - chroot: - description: - - The name of the chroot that you want to enable/disable/remove in the project, - for example C(epel-7-x86_64). Default chroot is determined by the operating system, - version of the operating system, and architecture on which the module is run. - type: str + host: + description: The Copr host to work with. + default: copr.fedorainfracloud.org + type: str + protocol: + description: This indicate which protocol to use with the host. + default: https + type: str + name: + description: Copr directory name, for example C(@copr/copr-dev). + required: true + type: str + state: + description: + - Whether to set this project as V(enabled), V(disabled), or V(absent). + default: enabled + type: str + choices: [absent, enabled, disabled] + chroot: + description: + - The name of the chroot that you want to enable/disable/remove in the project, for example V(epel-7-x86_64). Default + chroot is determined by the operating system, version of the operating system, and architecture on which the module + is run. + type: str + includepkgs: + description: List of packages to include. + required: false + type: list + elements: str + version_added: 9.4.0 + excludepkgs: + description: List of packages to exclude. + required: false + type: list + elements: str + version_added: 9.4.0 """ EXAMPLES = r""" @@ -59,6 +75,13 @@ EXAMPLES = r""" community.general.copr: state: absent name: '@copr/integration_tests' + +- name: Install Caddy + community.general.copr: + name: '@caddy/caddy' + chroot: fedora-rawhide-{{ ansible_facts.architecture }} + includepkgs: + - caddy """ RETURN = r""" @@ -78,6 +101,7 @@ repo: import stat import os import traceback +from urllib.error import HTTPError try: import dnf @@ -90,11 +114,24 @@ except ImportError: DNF_IMP_ERR = traceback.format_exc() HAS_DNF_PACKAGES = False -from ansible.module_utils.six.moves.urllib.error import HTTPError +from ansible.module_utils.common import respawn from ansible.module_utils.basic import missing_required_lib -from ansible.module_utils import distro # pylint: disable=import-error -from ansible.module_utils.basic import AnsibleModule # pylint: disable=import-error -from ansible.module_utils.urls import open_url # pylint: disable=import-error +from ansible.module_utils import distro +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.urls import open_url + + +def _respawn_dnf(): + if respawn.has_respawned(): + return + system_interpreters = ( + "/usr/libexec/platform-python", + "/usr/bin/python3", + "/usr/bin/python", + ) + interpreter = respawn.probe_interpreters_for_module(system_interpreters, "dnf") + if interpreter: + respawn.respawn_module(interpreter) class CoprModule(object): @@ -233,6 +270,12 @@ class CoprModule(object): """ if not repo_content: repo_content = self._download_repo_info() + if self.ansible_module.params["includepkgs"]: + includepkgs_value = ','.join(self.ansible_module.params['includepkgs']) + repo_content = repo_content.rstrip('\n') + '\nincludepkgs={0}\n'.format(includepkgs_value) + if self.ansible_module.params["excludepkgs"]: + excludepkgs_value = ','.join(self.ansible_module.params['excludepkgs']) + repo_content = repo_content.rstrip('\n') + '\nexcludepkgs={0}\n'.format(excludepkgs_value) if self._compare_repo_content(repo_filename_path, repo_content): return False if not self.check_mode: @@ -448,11 +491,14 @@ def run_module(): name=dict(type="str", required=True), state=dict(type="str", choices=["enabled", "disabled", "absent"], default="enabled"), chroot=dict(type="str"), + includepkgs=dict(type='list', elements="str"), + excludepkgs=dict(type='list', elements="str"), ) module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) params = module.params if not HAS_DNF_PACKAGES: + _respawn_dnf() module.fail_json(msg=missing_required_lib("dnf"), exception=DNF_IMP_ERR) CoprModule.ansible_module = module diff --git a/plugins/modules/cpanm.py b/plugins/modules/cpanm.py index 98f37d573e..39844d5f74 100644 --- a/plugins/modules/cpanm.py +++ b/plugins/modules/cpanm.py @@ -1,27 +1,31 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2012, Franck Cuny # Copyright (c) 2021, Alexei Znamensky # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: cpanm short_description: Manages Perl library dependencies description: - Manage Perl library dependencies using cpanminus. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: name: type: str description: - - The Perl library to install. Valid values change according to the I(mode), see notes for more details. - - Note that for installing from a local path the parameter I(from_path) should be used. + - The Perl library to install. Valid values change according to the O(mode), see notes for more details. + - Note that for installing from a local path the parameter O(from_path) should be used. aliases: [pkg] from_path: type: path @@ -50,9 +54,27 @@ options: - Only install dependencies. type: bool default: false + install_recommendations: + description: + - If V(true), installs dependencies declared as recommends per META spec. + - If V(false), it ensures the dependencies declared as recommends are not installed, overriding any decision made earlier + in E(PERL_CPANM_OPT). + - If parameter is not set, C(cpanm) uses its existing defaults. + - When these dependencies fail to install, cpanm continues the installation, since they are just recommendation. + type: bool + version_added: 10.3.0 + install_suggestions: + description: + - If V(true), installs dependencies declared as suggests per META spec. + - If V(false), it ensures the dependencies declared as suggests are not installed, overriding any decision made earlier + in E(PERL_CPANM_OPT). + - If parameter is not set, C(cpanm) uses its existing defaults. + - When these dependencies fail to install, cpanm continues the installation, since they are just suggestion. + type: bool + version_added: 10.3.0 version: description: - - Version specification for the perl module. When I(mode) is C(new), C(cpanm) version operators are accepted. + - Version specification for the perl module. When O(mode) is V(new), C(cpanm) version operators are accepted. type: str executable: description: @@ -61,38 +83,37 @@ options: mode: description: - Controls the module behavior. See notes below for more details. + - The default changed from V(compatibility) to V(new) in community.general 9.0.0. + - 'O(mode=new): The O(name) parameter may refer to a module name, a distribution file, a HTTP URL or a git repository + URL as described in C(cpanminus) documentation. C(cpanm) version specifiers are recognized. This is the default mode + from community.general 9.0.0 onwards.' + - 'O(mode=compatibility): This was the default mode before community.general 9.0.0. O(name) must be either a module + name or a distribution file. If the perl module given by O(name) is installed (at the exact O(version) when specified), + then nothing happens. Otherwise, it is installed using the C(cpanm) executable. O(name) cannot be an URL, or a git + URL. C(cpanm) version specifiers do not work in this mode.' + - 'B(ATTENTION): V(compatibility) mode is deprecated and will be removed in community.general 13.0.0.' type: str choices: [compatibility, new] - default: compatibility + default: new version_added: 3.0.0 name_check: description: - - When in C(new) mode, this parameter can be used to check if there is a module I(name) installed (at I(version), when specified). + - When O(mode=new), this parameter can be used to check if there is a module O(name) installed (at O(version), when + specified). type: str version_added: 3.0.0 notes: - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host. - - "This module now comes with a choice of execution I(mode): C(compatibility) or C(new)." - - "C(compatibility) mode:" - - When using C(compatibility) mode, the module will keep backward compatibility. This is the default mode. - - I(name) must be either a module name or a distribution file. - - > - If the perl module given by I(name) is installed (at the exact I(version) when specified), then nothing happens. - Otherwise, it will be installed using the C(cpanm) executable. - - I(name) cannot be an URL, or a git URL. - - C(cpanm) version specifiers do not work in this mode. - - "C(new) mode:" - - "When using C(new) mode, the module will behave differently" - - > - The I(name) parameter may refer to a module name, a distribution file, - a HTTP URL or a git repository URL as described in C(cpanminus) documentation. - - C(cpanm) version specifiers are recognized. +seealso: + - name: C(cpanm) command manual page + description: Manual page for the command. + link: https://metacpan.org/dist/App-cpanminus/view/bin/cpanm author: - "Franck Cuny (@fcuny)" - "Alexei Znamensky (@russoz)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install Dancer perl package community.general.cpanm: name: Dancer @@ -130,9 +151,20 @@ EXAMPLES = ''' community.general.cpanm: name: Dancer version: '1.0' -''' +""" + +RETURN = r""" +cpanm_version: + description: Version of CPANMinus. + type: str + returned: always + sample: "1.7047" + version_added: 10.0.0 +""" + import os +import re from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper @@ -150,8 +182,10 @@ class CPANMinus(ModuleHelper): mirror=dict(type='str'), mirror_only=dict(type='bool', default=False), installdeps=dict(type='bool', default=False), + install_recommendations=dict(type='bool'), + install_suggestions=dict(type='bool'), executable=dict(type='path'), - mode=dict(type='str', choices=['compatibility', 'new'], default='compatibility'), + mode=dict(type='str', default='new', choices=['compatibility', 'new']), name_check=dict(type='str') ), required_one_of=[('name', 'from_path')], @@ -164,7 +198,10 @@ class CPANMinus(ModuleHelper): mirror=cmd_runner_fmt.as_opt_val('--mirror'), mirror_only=cmd_runner_fmt.as_bool("--mirror-only"), installdeps=cmd_runner_fmt.as_bool("--installdeps"), + install_recommendations=cmd_runner_fmt.as_bool("--with-recommends", "--without-recommends", ignore_none=True), + install_suggestions=cmd_runner_fmt.as_bool("--with-suggests", "--without-suggests", ignore_none=True), pkg_spec=cmd_runner_fmt.as_list(), + cpanm_version=cmd_runner_fmt.as_fixed("--version"), ) def __init_module__(self): @@ -172,12 +209,22 @@ class CPANMinus(ModuleHelper): if v.mode == "compatibility": if v.name_check: self.do_raise("Parameter name_check can only be used with mode=new") + self.deprecate("'mode=compatibility' is deprecated, use 'mode=new' instead", version='13.0.0', collection_name="community.general") else: if v.name and v.from_path: self.do_raise("Parameters 'name' and 'from_path' are mutually exclusive when 'mode=new'") - self.command = self.module.get_bin_path(v.executable if v.executable else self.command) - self.vars.set("binary", self.command) + self.command = v.executable if v.executable else self.command + self.runner = CmdRunner(self.module, self.command, self.command_args_formats, check_rc=True) + self.vars.binary = self.runner.binary + + with self.runner("cpanm_version") as ctx: + rc, out, err = ctx.run() + line = out.split('\n')[0] + match = re.search(r"version\s+([\d\.]+)\s+", line) + if not match: + self.do_raise("Failed to determine version number. First line of output: {0}".format(line)) + self.vars.cpanm_version = match.group(1) def _is_package_installed(self, name, locallib, version): def process(rc, out, err): @@ -213,8 +260,6 @@ class CPANMinus(ModuleHelper): self.do_raise(msg=err, cmd=self.vars.cmd_args) return 'is up to date' not in err and 'is up to date' not in out - runner = CmdRunner(self.module, self.command, self.command_args_formats, check_rc=True) - v = self.vars pkg_param = 'from_path' if v.from_path else 'name' @@ -228,7 +273,16 @@ class CPANMinus(ModuleHelper): return pkg_spec = self.sanitize_pkg_spec_version(v[pkg_param], v.version) - with runner(['notest', 'locallib', 'mirror', 'mirror_only', 'installdeps', 'pkg_spec'], output_process=process) as ctx: + with self.runner([ + 'notest', + 'locallib', + 'mirror', + 'mirror_only', + 'installdeps', + 'install_recommendations', + 'install_suggestions', + 'pkg_spec' + ], output_process=process) as ctx: self.changed = ctx.run(pkg_spec=pkg_spec) diff --git a/plugins/modules/cronvar.py b/plugins/modules/cronvar.py index 9299880537..b67b94fe95 100644 --- a/plugins/modules/cronvar.py +++ b/plugins/modules/cronvar.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) @@ -14,16 +13,21 @@ # This module is based on the crontab module. -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: cronvar short_description: Manage variables in crontabs description: - Use this module to manage crontab variables. - This module allows you to create, update, or delete cron variable definitions. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: name: description: @@ -33,48 +37,47 @@ options: value: description: - The value to set this variable to. - - Required if I(state=present). + - Required if O(state=present). type: str insertafter: description: - - If specified, the variable will be inserted after the variable specified. - - Used with I(state=present). + - If specified, the variable is inserted after the variable specified. + - Used with O(state=present). type: str insertbefore: description: - - Used with I(state=present). If specified, the variable will be inserted - just before the variable specified. + - Used with O(state=present). If specified, the variable is inserted just before the variable specified. type: str state: description: - Whether to ensure that the variable is present or absent. type: str - choices: [ absent, present ] + choices: [absent, present] default: present user: description: - The specific user whose crontab should be modified. - - This parameter defaults to C(root) when unset. + - This parameter defaults to V(root) when unset. type: str cron_file: description: - If specified, uses this file instead of an individual user's crontab. - - Without a leading C(/), this is assumed to be in I(/etc/cron.d). - - With a leading C(/), this is taken as absolute. + - Without a leading V(/), this is assumed to be in C(/etc/cron.d). + - With a leading V(/), this is taken as absolute. type: str backup: description: - - If set, create a backup of the crontab before it is modified. - The location of the backup is returned in the C(backup) variable by this module. + - If set, create a backup of the crontab before it is modified. The location of the backup is returned in the C(backup) + variable by this module. type: bool default: false requirements: - cron author: -- Doug Luce (@dougluce) -''' + - Doug Luce (@dougluce) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Ensure entry like "EMAIL=doug@ansibmod.con.com" exists community.general.cronvar: name: EMAIL @@ -91,7 +94,7 @@ EXAMPLES = r''' value: /var/log/yum-autoupdate.log user: root cron_file: ansible_yum-autoupdate -''' +""" import os import platform @@ -100,9 +103,9 @@ import re import shlex import sys import tempfile +from shlex import quote as shlex_quote from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves import shlex_quote class CronVarError(Exception): @@ -130,6 +133,9 @@ class CronVar(object): self.cron_file = cron_file else: self.cron_file = os.path.join('/etc/cron.d', cron_file) + parent_dir = os.path.dirname(self.cron_file) + if parent_dir and not os.path.isdir(parent_dir): + module.fail_json(msg="Parent directory '{}' does not exist for cron_file: '{}'".format(parent_dir, cron_file)) else: self.cron_file = None @@ -141,9 +147,8 @@ class CronVar(object): if self.cron_file: # read the cronfile try: - f = open(self.cron_file, 'r') - self.lines = f.read().splitlines() - f.close() + with open(self.cron_file, 'r') as f: + self.lines = f.read().splitlines() except IOError: # cron file does not exist return @@ -175,6 +180,7 @@ class CronVar(object): fileh = open(backup_file, 'w') elif self.cron_file: fileh = open(self.cron_file, 'w') + path = None else: filed, path = tempfile.mkstemp(prefix='crontab') fileh = os.fdopen(filed, 'w') @@ -388,6 +394,8 @@ def main(): old_value = cronvar.find_variable(name) if ensure_present: + if value == "" and old_value != "": + value = '""' if old_value is None: cronvar.add_variable(name, value, insertbefore, insertafter) changed = True diff --git a/plugins/modules/crypttab.py b/plugins/modules/crypttab.py index a334e8ab3f..4eb8e4b6c2 100644 --- a/plugins/modules/crypttab.py +++ b/plugins/modules/crypttab.py @@ -1,63 +1,63 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2014, Steve # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: crypttab short_description: Encrypted Linux block devices description: - Control Linux encrypted block devices that are set up during system boot in C(/etc/crypttab). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: name: description: - - Name of the encrypted block device as it appears in the C(/etc/crypttab) file, or - optionally prefixed with C(/dev/mapper/), as it appears in the filesystem. I(/dev/mapper/) - will be stripped from I(name). + - Name of the encrypted block device as it appears in the C(/etc/crypttab) file, or optionally prefixed with V(/dev/mapper/), + as it appears in the filesystem. V(/dev/mapper/) is stripped from O(name). type: str required: true state: description: - - Use I(present) to add a line to C(/etc/crypttab) or update its definition - if already present. - - Use I(absent) to remove a line with matching I(name). - - Use I(opts_present) to add options to those already present; options with - different values will be updated. - - Use I(opts_absent) to remove options from the existing set. + - Use V(present) to add a line to C(/etc/crypttab) or update its definition if already present. + - Use V(absent) to remove a line with matching O(name). + - Use V(opts_present) to add options to those already present; options with different values are updated. + - Use V(opts_absent) to remove options from the existing set. type: str required: true - choices: [ absent, opts_absent, opts_present, present ] + choices: [absent, opts_absent, opts_present, present] backing_device: description: - - Path to the underlying block device or file, or the UUID of a block-device - prefixed with I(UUID=). + - Path to the underlying block device or file, or the UUID of a block-device prefixed with V(UUID=). type: str password: description: - - Encryption password, the path to a file containing the password, or - C(-) or unset if the password should be entered at boot. + - Encryption password, the path to a file containing the password, or V(-) or unset if the password should be entered + at boot. type: path opts: description: - - A comma-delimited list of options. See C(crypttab(5) ) for details. + - A comma-delimited list of options. See V(crypttab(5\)) for details. type: str path: description: - - Path to file to use instead of C(/etc/crypttab). + - Path to file to use instead of V(/etc/crypttab). - This might be useful in a chroot environment. type: path default: /etc/crypttab author: -- Steve (@groks) -''' + - Steve (@groks) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Set the options explicitly a device which must already exist community.general.crypttab: name: luks-home @@ -70,8 +70,16 @@ EXAMPLES = r''' state: opts_present opts: discard loop: '{{ ansible_mounts }}' - when: "'/dev/mapper/luks-' in {{ item.device }}" -''' + when: "'/dev/mapper/luks-' in item.device" + +- name: Add entry to /etc/crypttab for luks-home with password file + community.general.crypttab: + name: luks-home + backing_device: UUID=123e4567-e89b-12d3-a456-426614174000 + password: /root/keys/luks-home.key + opts: discard,cipher=aes-cbc-essiv:sha256 + state: present +""" import os import traceback @@ -114,7 +122,7 @@ def main(): ('backing_device', backing_device), ('password', password), ('opts', opts)): - if (arg is not None and (' ' in arg or '\t' in arg or arg == '')): + if arg is not None and (' ' in arg or '\t' in arg or arg == ''): module.fail_json(msg="invalid '%s': contains white space or is empty" % arg_name, **module.params) @@ -152,11 +160,8 @@ def main(): changed, reason = existing_line.opts.remove(opts) if changed and not module.check_mode: - try: - f = open(path, 'wb') + with open(path, 'wb') as f: f.write(to_bytes(crypttab, errors='surrogate_or_strict')) - finally: - f.close() module.exit_json(changed=changed, msg=reason, **module.params) @@ -171,12 +176,9 @@ class Crypttab(object): os.makedirs(os.path.dirname(path)) open(path, 'a').close() - try: - f = open(path, 'r') + with open(path, 'r') as f: for line in f.readlines(): self._lines.append(Line(line)) - finally: - f.close() def add(self, line): self._lines.append(line) diff --git a/plugins/modules/datadog_downtime.py b/plugins/modules/datadog_downtime.py index 8a1acf7e83..82365ff06a 100644 --- a/plugins/modules/datadog_downtime.py +++ b/plugins/modules/datadog_downtime.py @@ -1,149 +1,154 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2020, Datadog, Inc # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = """ ---- +DOCUMENTATION = r""" module: datadog_downtime short_description: Manages Datadog downtimes version_added: 2.0.0 description: - Manages downtimes within Datadog. - - Options as described on U(https://docs.datadoghq.com/api/v1/downtimes/s). + - Options as described on U(https://docs.datadoghq.com/api/v1/downtimes/). author: - Datadog (@Datadog) requirements: - datadog-api-client - Python 3.6+ +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - api_key: - description: - - Your Datadog API key. - required: true - type: str - api_host: - description: - - The URL to the Datadog API. - - This value can also be set with the C(DATADOG_HOST) environment variable. - required: false - default: https://api.datadoghq.com - type: str - app_key: - description: - - Your Datadog app key. - required: true - type: str - state: - description: - - The designated state of the downtime. - required: false - choices: ["present", "absent"] - default: present - type: str - id: - description: - - The identifier of the downtime. - - If empty, a new downtime gets created, otherwise it is either updated or deleted depending of the C(state). - - To keep your playbook idempotent, you should save the identifier in a file and read it in a lookup. - type: int + api_key: + description: + - Your Datadog API key. + required: true + type: str + api_host: + description: + - The URL to the Datadog API. + - This value can also be set with the E(DATADOG_HOST) environment variable. + required: false + default: https://api.datadoghq.com + type: str + app_key: + description: + - Your Datadog app key. + required: true + type: str + state: + description: + - The designated state of the downtime. + required: false + choices: ["present", "absent"] + default: present + type: str + id: + description: + - The identifier of the downtime. + - If empty, a new downtime gets created, otherwise it is either updated or deleted depending of the O(state). + - To keep your playbook idempotent, you should save the identifier in a file and read it in a lookup. + type: int + monitor_tags: + description: + - A list of monitor tags to which the downtime applies. + - The resulting downtime applies to monitors that match ALL provided monitor tags. + type: list + elements: str + scope: + description: + - A list of scopes to which the downtime applies. + - The resulting downtime applies to sources that matches ALL provided scopes. + type: list + elements: str + monitor_id: + description: + - The ID of the monitor to mute. If not provided, the downtime applies to all monitors. + type: int + downtime_message: + description: + - A message to include with notifications for this downtime. + - Email notifications can be sent to specific users by using the same "@username" notation as events. + type: str + start: + type: int + description: + - POSIX timestamp to start the downtime. If not provided, the downtime starts the moment it is created. + end: + type: int + description: + - POSIX timestamp to end the downtime. If not provided, the downtime is in effect until you cancel it. + timezone: + description: + - The timezone for the downtime. + type: str + rrule: + description: + - The C(RRULE) standard for defining recurring events. + - For example, to have a recurring event on the first day of each month, select a type of rrule and set the C(FREQ) + to C(MONTHLY) and C(BYMONTHDAY) to C(1). + - Most common rrule options from the iCalendar Spec are supported. + - Attributes specifying the duration in C(RRULE) are not supported (for example C(DTSTART), C(DTEND), C(DURATION)). + type: str +""" + +EXAMPLES = r""" +- name: Create a downtime + register: downtime_var + community.general.datadog_downtime: + state: present monitor_tags: - description: - - A list of monitor tags to which the downtime applies. - - The resulting downtime applies to monitors that match ALL provided monitor tags. - type: list - elements: str - scope: - description: - - A list of scopes to which the downtime applies. - - The resulting downtime applies to sources that matches ALL provided scopes. - type: list - elements: str - monitor_id: - description: - - The ID of the monitor to mute. If not provided, the downtime applies to all monitors. - type: int - downtime_message: - description: - - A message to include with notifications for this downtime. - - Email notifications can be sent to specific users by using the same "@username" notation as events. - type: str - start: - type: int - description: - - POSIX timestamp to start the downtime. If not provided, the downtime starts the moment it is created. - end: - type: int - description: - - POSIX timestamp to end the downtime. If not provided, the downtime is in effect until you cancel it. - timezone: - description: - - The timezone for the downtime. - type: str - rrule: - description: - - The C(RRULE) standard for defining recurring events. - - For example, to have a recurring event on the first day of each month, - select a type of rrule and set the C(FREQ) to C(MONTHLY) and C(BYMONTHDAY) to C(1). - - Most common rrule options from the iCalendar Spec are supported. - - Attributes specifying the duration in C(RRULE) are not supported (e.g. C(DTSTART), C(DTEND), C(DURATION)). - type: str + - "foo:bar" + downtime_message: "Downtime for foo:bar" + scope: "test" + api_key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + app_key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + # Lookup the id in the file and ignore errors if the file doesn't exits, so downtime gets created + id: "{{ lookup('file', inventory_hostname ~ '_downtime_id.txt', errors='ignore') }}" +- name: Save downtime id to file for later updates and idempotence + delegate_to: localhost + copy: + content: "{{ downtime.downtime.id }}" + dest: "{{ inventory_hostname ~ '_downtime_id.txt' }}" """ -EXAMPLES = """ - - name: Create a downtime - register: downtime_var - community.general.datadog_downtime: - state: present - monitor_tags: - - "foo:bar" - downtime_message: "Downtime for foo:bar" - scope: "test" - api_key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" - app_key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" - # Lookup the id in the file and ignore errors if the file doesn't exits, so downtime gets created - id: "{{ lookup('file', inventory_hostname ~ '_downtime_id.txt', errors='ignore') }}" - - name: Save downtime id to file for later updates and idempotence - delegate_to: localhost - copy: - content: "{{ downtime.downtime.id }}" - dest: "{{ inventory_hostname ~ '_downtime_id.txt' }}" -""" - -RETURN = """ +RETURN = r""" # Returns the downtime JSON dictionary from the API response under the C(downtime) key. # See https://docs.datadoghq.com/api/v1/downtimes/#schedule-a-downtime for more details. downtime: - description: The downtime returned by the API. - type: dict - returned: always - sample: { - "active": true, - "canceled": null, - "creator_id": 1445416, - "disabled": false, - "downtime_type": 2, - "end": null, - "id": 1055751000, - "message": "Downtime for foo:bar", - "monitor_id": null, - "monitor_tags": [ - "foo:bar" - ], - "parent_id": null, - "recurrence": null, - "scope": [ - "test" - ], - "start": 1607015009, - "timezone": "UTC", - "updater_id": null + description: The downtime returned by the API. + type: dict + returned: always + sample: + { + "active": true, + "canceled": null, + "creator_id": 1445416, + "disabled": false, + "downtime_type": 2, + "end": null, + "id": 1055751000, + "message": "Downtime for foo:bar", + "monitor_id": null, + "monitor_tags": [ + "foo:bar" + ], + "parent_id": null, + "recurrence": null, + "scope": [ + "test" + ], + "start": 1607015009, + "timezone": "UTC", + "updater_id": null } """ @@ -168,18 +173,18 @@ def main(): module = AnsibleModule( argument_spec=dict( api_key=dict(required=True, no_log=True), - api_host=dict(required=False, default="https://api.datadoghq.com"), + api_host=dict(default="https://api.datadoghq.com"), app_key=dict(required=True, no_log=True), - state=dict(required=False, choices=["present", "absent"], default="present"), - monitor_tags=dict(required=False, type="list", elements="str"), - scope=dict(required=False, type="list", elements="str"), - monitor_id=dict(required=False, type="int"), - downtime_message=dict(required=False, no_log=True), - start=dict(required=False, type="int"), - end=dict(required=False, type="int"), - timezone=dict(required=False, type="str"), - rrule=dict(required=False, type="str"), - id=dict(required=False, type="int"), + state=dict(choices=["present", "absent"], default="present"), + monitor_tags=dict(type="list", elements="str"), + scope=dict(type="list", elements="str"), + monitor_id=dict(type="int"), + downtime_message=dict(no_log=True), + start=dict(type="int"), + end=dict(type="int"), + timezone=dict(type="str"), + rrule=dict(type="str"), + id=dict(type="int"), ) ) @@ -241,7 +246,8 @@ def build_downtime(module): downtime.timezone = module.params["timezone"] if module.params["rrule"]: downtime.recurrence = DowntimeRecurrence( - rrule=module.params["rrule"] + rrule=module.params["rrule"], + type="rrule", ) return downtime diff --git a/plugins/modules/datadog_event.py b/plugins/modules/datadog_event.py index 0a669c4a81..c34951992e 100644 --- a/plugins/modules/datadog_event.py +++ b/plugins/modules/datadog_event.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Author: Artūras 'arturaz' Šlajus # Author: Naoya Nakazawa @@ -10,78 +9,91 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: datadog_event -short_description: Posts events to Datadog service +short_description: Posts events to Datadog service description: -- "Allows to post events to Datadog (www.datadoghq.com) service." -- "Uses http://docs.datadoghq.com/api/#events API." + - Allows to post events to Datadog (www.datadoghq.com) service. + - Uses http://docs.datadoghq.com/api/#events API. author: -- "Artūras 'arturaz' Šlajus (@arturaz)" -- "Naoya Nakazawa (@n0ts)" + - "Artūras 'arturaz' Šlajus (@arturaz)" + - "Naoya Nakazawa (@n0ts)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - api_key: - type: str - description: ["Your DataDog API key."] - required: true - app_key: - type: str - description: ["Your DataDog app key."] - required: true - title: - type: str - description: ["The event title."] - required: true - text: - type: str - description: ["The body of the event."] - required: true - date_happened: - type: int - description: - - POSIX timestamp of the event. - - Default value is now. - priority: - type: str - description: ["The priority of the event."] - default: normal - choices: [normal, low] - host: - type: str - description: - - Host name to associate with the event. - - If not specified, it defaults to the remote system's hostname. - api_host: - type: str - description: - - DataDog API endpoint URL. - version_added: '3.3.0' - tags: - type: list - elements: str - description: ["Comma separated list of tags to apply to the event."] - alert_type: - type: str - description: ["Type of alert."] - default: info - choices: ['error', 'warning', 'info', 'success'] - aggregation_key: - type: str - description: ["An arbitrary string to use for aggregation."] - validate_certs: - description: - - If C(false), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - type: bool - default: true -''' + api_key: + type: str + description: + - Your DataDog API key. + required: true + app_key: + type: str + description: + - Your DataDog app key. + required: true + title: + type: str + description: + - The event title. + required: true + text: + type: str + description: + - The body of the event. + required: true + date_happened: + type: int + description: + - POSIX timestamp of the event. + - Default value is now. + priority: + type: str + description: + - The priority of the event. + default: normal + choices: [normal, low] + host: + type: str + description: + - Host name to associate with the event. + - If not specified, it defaults to the remote system's hostname. + api_host: + type: str + description: + - DataDog API endpoint URL. + version_added: '3.3.0' + tags: + type: list + elements: str + description: + - Comma separated list of tags to apply to the event. + alert_type: + type: str + description: + - Type of alert. + default: info + choices: ['error', 'warning', 'info', 'success'] + aggregation_key: + type: str + description: + - An arbitrary string to use for aggregation. + validate_certs: + description: + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. + type: bool + default: true +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Post an event with low priority community.general.datadog_event: title: Testing from ansible @@ -109,8 +121,7 @@ EXAMPLES = ''' - aa - b - '#host:{{ inventory_hostname }}' - -''' +""" import platform import traceback diff --git a/plugins/modules/datadog_monitor.py b/plugins/modules/datadog_monitor.py index ef6aa84255..2b84d7dbd8 100644 --- a/plugins/modules/datadog_monitor.py +++ b/plugins/modules/datadog_monitor.py @@ -1,173 +1,207 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2015, Sebastian Kornehl # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: datadog_monitor short_description: Manages Datadog monitors description: - Manages monitors within Datadog. - Options as described on https://docs.datadoghq.com/api/. - - The type C(event-v2) was added in community.general 4.8.0. author: Sebastian Kornehl (@skornehl) requirements: [datadog] +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - api_key: - description: - - Your Datadog API key. - required: true - type: str - api_host: - description: - - The URL to the Datadog API. Default value is C(https://api.datadoghq.com). - - This value can also be set with the C(DATADOG_HOST) environment variable. - required: false - type: str - version_added: '0.2.0' - app_key: - description: - - Your Datadog app key. - required: true - type: str - state: - description: - - The designated state of the monitor. - required: true - choices: ['present', 'absent', 'mute', 'unmute'] - type: str - tags: - description: - - A list of tags to associate with your monitor when creating or updating. - - This can help you categorize and filter monitors. - type: list - elements: str - type: - description: - - The type of the monitor. - - The types C(query alert), C(trace-analytics alert) and C(rum alert) were added in community.general 2.1.0. - - The type C(composite) was added in community.general 3.4.0. - choices: - - metric alert - - service check - - event alert - - event-v2 alert - - process alert - - log alert - - query alert - - trace-analytics alert - - rum alert - - composite - type: str - query: - description: - - The monitor query to notify on. - - Syntax varies depending on what type of monitor you are creating. - type: str - name: - description: - - The name of the alert. - required: true - type: str - notification_message: - description: - - A message to include with notifications for this monitor. - - Email notifications can be sent to specific users by using the same '@username' notation as events. - - Monitor message template variables can be accessed by using double square brackets, i.e '[[' and ']]'. - type: str - silenced: - type: dict - description: - - Dictionary of scopes to silence, with timestamps or None. - - Each scope will be muted until the given POSIX timestamp or forever if the value is None. - notify_no_data: - description: - - Whether this monitor will notify when data stops reporting. - type: bool - default: false - no_data_timeframe: - description: - - The number of minutes before a monitor will notify when data stops reporting. - - Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks. - - If not specified, it defaults to 2x timeframe for metric, 2 minutes for service. - type: str - timeout_h: - description: - - The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state. - type: str - renotify_interval: - description: - - The number of minutes after the last notification before a monitor will re-notify on the current status. - - It will only re-notify if it is not resolved. - type: str - escalation_message: - description: - - A message to include with a re-notification. Supports the '@username' notification we allow elsewhere. - - Not applicable if I(renotify_interval=None). - type: str - notify_audit: - description: - - Whether tagged users will be notified on changes to this monitor. - type: bool - default: false - thresholds: - type: dict - description: - - A dictionary of thresholds by status. - - Only available for service checks and metric alerts. - - Because each of them can have multiple thresholds, we do not define them directly in the query. - - "If not specified, it defaults to: C({'ok': 1, 'critical': 1, 'warning': 1})." - locked: - description: - - Whether changes to this monitor should be restricted to the creator or admins. - type: bool - default: false - require_full_window: - description: - - Whether this monitor needs a full window of data before it gets evaluated. - - We highly recommend you set this to False for sparse metrics, otherwise some evaluations will be skipped. - type: bool - new_host_delay: - description: - - A positive integer representing the number of seconds to wait before evaluating the monitor for new hosts. - - This gives the host time to fully initialize. - type: str - evaluation_delay: - description: - - Time to delay evaluation (in seconds). - - Effective for sparse values. - type: str - id: - description: - - The ID of the alert. - - If set, will be used instead of the name to locate the alert. - type: str - include_tags: - description: - - Whether notifications from this monitor automatically inserts its triggering tags into the title. - type: bool - default: true - version_added: 1.3.0 - priority: - description: - - Integer from 1 (high) to 5 (low) indicating alert severity. - type: int - version_added: 4.6.0 -''' + api_key: + description: + - Your Datadog API key. + required: true + type: str + api_host: + description: + - The URL to the Datadog API. Default value is V(https://api.datadoghq.com). + - This value can also be set with the E(DATADOG_HOST) environment variable. + required: false + type: str + version_added: '0.2.0' + app_key: + description: + - Your Datadog app key. + required: true + type: str + state: + description: + - The designated state of the monitor. + required: true + choices: ['present', 'absent', 'mute', 'unmute'] + type: str + tags: + description: + - A list of tags to associate with your monitor when creating or updating. + - This can help you categorize and filter monitors. + type: list + elements: str + type: + description: + - The type of the monitor. + - The types V(query alert), V(trace-analytics alert) and V(rum alert) were added in community.general 2.1.0. + - The type V(composite) was added in community.general 3.4.0. + - The type V(event-v2 alert) was added in community.general 4.8.0. + choices: + - metric alert + - service check + - event alert + - event-v2 alert + - process alert + - log alert + - query alert + - trace-analytics alert + - rum alert + - composite + type: str + query: + description: + - The monitor query to notify on. + - Syntax varies depending on what type of monitor you are creating. + type: str + name: + description: + - The name of the alert. + required: true + type: str + notification_message: + description: + - A message to include with notifications for this monitor. + - Email notifications can be sent to specific users by using the same '@username' notation as events. + - Monitor message template variables can be accessed by using double square brackets, in other words C([[) and C(]]). + type: str + silenced: + type: dict + description: + - Dictionary of scopes to silence, with timestamps or None. + - Each scope is muted until the given POSIX timestamp or forever if the value is V(None). + notify_no_data: + description: + - Whether this monitor notifies when data stops reporting. + type: bool + default: false + no_data_timeframe: + description: + - The number of minutes before a monitor notifies when data stops reporting. + - Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks. + - If not specified, it defaults to 2x timeframe for metric, 2 minutes for service. + type: str + timeout_h: + description: + - The number of hours of the monitor not reporting data before it automatically resolves from a triggered state. + type: str + renotify_interval: + description: + - The number of minutes after the last notification before a monitor re-notifies on the current status. + - It only re-notifies if it is not resolved. + type: str + escalation_message: + description: + - A message to include with a re-notification. Supports the '@username' notification we allow elsewhere. + - Not applicable if O(renotify_interval=none). + type: str + notify_audit: + description: + - Whether tagged users are notified on changes to this monitor. + type: bool + default: false + thresholds: + type: dict + description: + - A dictionary of thresholds by status. + - Only available for service checks and metric alerts. + - Because each of them can have multiple thresholds, we do not define them directly in the query. + - "If not specified, it defaults to: V({'ok': 1, 'critical': 1, 'warning': 1})." + locked: + description: + - Whether changes to this monitor should be restricted to the creator or admins. + type: bool + default: false + require_full_window: + description: + - Whether this monitor needs a full window of data before it gets evaluated. + - We highly recommend you set this to V(false) for sparse metrics, otherwise some evaluations are skipped. + type: bool + new_host_delay: + description: + - A positive integer representing the number of seconds to wait before evaluating the monitor for new hosts. + - This gives the host time to fully initialize. + type: str + evaluation_delay: + description: + - Time to delay evaluation (in seconds). + - Effective for sparse values. + type: str + id: + description: + - The ID of the alert. + - If set, it is used instead of O(name) to locate the alert. + type: str + include_tags: + description: + - Whether notifications from this monitor automatically inserts its triggering tags into the title. + type: bool + default: true + version_added: 1.3.0 + priority: + description: + - Integer from V(1) (high) to V(5) (low) indicating alert severity. + type: int + version_added: 4.6.0 + notification_preset_name: + description: + - Toggles the display of additional content sent in the monitor notification. + choices: + - show_all + - hide_query + - hide_handles + - hide_all + type: str + version_added: 7.1.0 + renotify_occurrences: + description: + - The number of times re-notification messages should be sent on the current status at the provided re-notification + interval. + type: int + version_added: 7.1.0 + renotify_statuses: + description: + - The types of monitor statuses for which re-notification messages are sent. + choices: + - alert + - warn + - no data + type: list + elements: str + version_added: 7.1.0 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a metric monitor community.general.datadog_monitor: type: "metric alert" name: "Test monitor" state: "present" + renotify_interval: 30 + renotify_occurrences: 1 + renotify_statuses: ["warn"] + notification_preset_name: "show_all" query: "datadog.agent.up.over('host:host1').last(2).count_by_status()" notification_message: "Host [[host.name]] with IP [[host.ip]] is failing to report to datadog." api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" @@ -202,7 +236,8 @@ EXAMPLES = ''' api_host: https://api.datadoghq.eu api_key: "9775a026f1ca7d1c6c5af9d94d9595a4" app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff" -''' +""" + import traceback # Import Datadog @@ -238,15 +273,18 @@ def main(): renotify_interval=dict(), escalation_message=dict(), notify_audit=dict(default=False, type='bool'), - thresholds=dict(type='dict', default=None), - tags=dict(type='list', elements='str', default=None), + thresholds=dict(type='dict'), + tags=dict(type='list', elements='str'), locked=dict(default=False, type='bool'), require_full_window=dict(type='bool'), new_host_delay=dict(), evaluation_delay=dict(), id=dict(), - include_tags=dict(required=False, default=True, type='bool'), + include_tags=dict(default=True, type='bool'), priority=dict(type='int'), + notification_preset_name=dict(choices=['show_all', 'hide_query', 'hide_handles', 'hide_all']), + renotify_occurrences=dict(type='int'), + renotify_statuses=dict(type='list', elements='str', choices=['alert', 'warn', 'no data']), ) ) @@ -361,6 +399,9 @@ def install_monitor(module): "new_host_delay": module.params['new_host_delay'], "evaluation_delay": module.params['evaluation_delay'], "include_tags": module.params['include_tags'], + "notification_preset_name": module.params['notification_preset_name'], + "renotify_occurrences": module.params['renotify_occurrences'], + "renotify_statuses": module.params['renotify_statuses'], } if module.params['type'] == "service check": @@ -392,7 +433,7 @@ def mute_monitor(module): module.fail_json(msg="Monitor %s not found!" % module.params['name']) elif monitor['options']['silenced']: module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.") - elif (module.params['silenced'] is not None and len(set(monitor['options']['silenced']) ^ set(module.params['silenced'])) == 0): + elif module.params['silenced'] is not None and len(set(monitor['options']['silenced']) ^ set(module.params['silenced'])) == 0: module.exit_json(changed=False) try: if module.params['silenced'] is None or module.params['silenced'] == "": diff --git a/plugins/modules/dconf.py b/plugins/modules/dconf.py index 61bf6f0e3f..e9e9d82514 100644 --- a/plugins/modules/dconf.py +++ b/plugins/modules/dconf.py @@ -1,49 +1,52 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017, Branko Majic # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: dconf author: - - "Branko Majic (@azaghal)" + - "Branko Majic (@azaghal)" short_description: Modify and read dconf database description: - - This module allows modifications and reading of C(dconf) database. The module - is implemented as a wrapper around C(dconf) tool. Please see the dconf(1) man - page for more details. - - Since C(dconf) requires a running D-Bus session to change values, the module - will try to detect an existing session and reuse it, or run the tool via - C(dbus-run-session). + - This module allows modifications and reading of C(dconf) database. The module is implemented as a wrapper around C(dconf) + tool. Please see the dconf(1) man page for more details. + - Since C(dconf) requires a running D-Bus session to change values, the module tries to detect an existing session and reuse + it, or run the tool using C(dbus-run-session). +requirements: + - Optionally the C(gi.repository) Python library (usually included in the OS on hosts which have C(dconf)); this is to become + a non-optional requirement in a future major release of community.general. notes: - - This module depends on C(psutil) Python library (version 4.0.0 and upwards), - C(dconf), C(dbus-send), and C(dbus-run-session) binaries. Depending on - distribution you are using, you may need to install additional packages to - have these available. - - Detection of existing, running D-Bus session, required to change settings - via C(dconf), is not 100% reliable due to implementation details of D-Bus - daemon itself. This might lead to running applications not picking-up - changes on the fly if options are changed via Ansible and - C(dbus-run-session). - - Keep in mind that the C(dconf) CLI tool, which this module wraps around, - utilises an unusual syntax for the values (GVariant). For example, if you - wanted to provide a string value, the correct syntax would be - I(value="'myvalue'") - with single quotes as part of the Ansible parameter - value. - - When using loops in combination with a value like - :code:`"[('xkb', 'us'), ('xkb', 'se')]"`, you need to be aware of possible - type conversions. Applying a filter :code:`"{{ item.value | string }}"` - to the parameter variable can avoid potential conversion problems. - - The easiest way to figure out exact syntax/value you need to provide for a - key is by making the configuration change in application affected by the - key, and then having a look at value set via commands C(dconf dump - /path/to/dir/) or C(dconf read /path/to/key). + - This module depends on C(psutil) Python library (version 4.0.0 and upwards), C(dconf), C(dbus-send), and C(dbus-run-session) + binaries. Depending on distribution you are using, you may need to install additional packages to have these available. + - This module uses the C(gi.repository) Python library when available for accurate comparison of values in C(dconf) to values + specified in Ansible code. C(gi.repository) is likely to be present on most systems which have C(dconf) but may not be + present everywhere. When it is missing, a simple string comparison between values is used, and there may be false positives, + that is, Ansible may think that a value is being changed when it is not. This fallback is to be removed in a future version + of this module, at which point the module will stop working on hosts without C(gi.repository). + - Detection of existing, running D-Bus session, required to change settings using C(dconf), is not 100% reliable due to + implementation details of D-Bus daemon itself. This might lead to running applications not picking-up changes on-the-fly + if options are changed using Ansible and C(dbus-run-session). + - Keep in mind that the C(dconf) CLI tool, which this module wraps around, utilises an unusual syntax for the values (GVariant). + For example, if you wanted to provide a string value, the correct syntax would be O(value="'myvalue'") - with single quotes + as part of the Ansible parameter value. + - When using loops in combination with a value like V("[('xkb', 'us'\), ('xkb', 'se'\)]"), you need to be aware of possible + type conversions. Applying a filter V({{ item.value | string }}) to the parameter variable can avoid potential conversion + problems. + - The easiest way to figure out exact syntax/value you need to provide for a key is by making the configuration change in + application affected by the key, and then having a look at value set using commands C(dconf dump /path/to/dir/) or C(dconf + read /path/to/key). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: key: type: str @@ -51,28 +54,30 @@ options: description: - A dconf key to modify or read from the dconf database. value: - type: str + type: raw required: false description: - - Value to set for the specified dconf key. Value should be specified in - GVariant format. Due to complexity of this format, it is best to have a - look at existing values in the dconf database. - - Required for I(state=present). + - Value to set for the specified dconf key. Value should be specified in GVariant format. Due to complexity of this + format, it is best to have a look at existing values in the dconf database. + - Required for O(state=present). + - Although the type is specified as "raw", it should typically be specified as a string. However, boolean values in + particular are handled properly even when specified as booleans rather than strings (in fact, handling booleans properly + is why the type of this parameter is "raw"). state: type: str required: false default: present - choices: [ 'read', 'present', 'absent' ] + choices: ['read', 'present', 'absent'] description: - The action to take upon the key/value. -''' +""" RETURN = r""" value: - description: value associated with the requested key - returned: success, state was "read" - type: str - sample: "'Default'" + description: Value associated with the requested key. + returned: success, state was "read" + type: str + sample: "'Default'" """ EXAMPLES = r""" @@ -119,17 +124,27 @@ EXAMPLES = r""" import os -import traceback +import sys + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.respawn import ( + has_respawned, + probe_interpreters_for_module, + respawn_module, +) +from ansible.module_utils.common.text.converters import to_native +from ansible_collections.community.general.plugins.module_utils import deps + +glib_module_name = 'gi.repository.GLib' -PSUTIL_IMP_ERR = None try: - import psutil - HAS_PSUTIL = True + from gi.repository.GLib import Variant, GError except ImportError: - PSUTIL_IMP_ERR = traceback.format_exc() - HAS_PSUTIL = False + Variant = None + GError = AttributeError -from ansible.module_utils.basic import AnsibleModule, missing_required_lib +with deps.declare("psutil"): + import psutil class DBusWrapper(object): @@ -251,6 +266,29 @@ class DconfPreference(object): # Check if dconf binary exists self.dconf_bin = self.module.get_bin_path('dconf', required=True) + @staticmethod + def variants_are_equal(canonical_value, user_value): + """Compare two string GVariant representations for equality. + + Assumes `canonical_value` is "canonical" in the sense that the type of + the variant is specified explicitly if it cannot be inferred; this is + true for textual representations of variants generated by the `dconf` + command. The type of `canonical_value` is used to parse `user_value`, + so the latter does not need to be explicitly typed. + + Returns True if the two values are equal. + """ + if canonical_value is None: + # It's unset in dconf database, so anything the user is trying to + # set is a change. + return False + try: + variant1 = Variant.parse(None, canonical_value) + variant2 = Variant.parse(variant1.get_type(), user_value) + return variant1 == variant2 + except GError: + return canonical_value == user_value + def read(self, key): """ Retrieves current value associated with the dconf key. @@ -291,7 +329,7 @@ class DconfPreference(object): """ # If no change is needed (or won't be done due to check_mode), notify # caller straight away. - if value == self.read(key): + if self.variants_are_equal(self.read(key), value): return False elif self.check_mode: return True @@ -305,7 +343,7 @@ class DconfPreference(object): rc, out, err = dbus_wrapper.run_command(command) if rc != 0: - self.module.fail_json(msg='dconf failed while write the value with error: %s' % err, + self.module.fail_json(msg='dconf failed while writing key %s, value %s with error: %s' % (key, value, err), out=out, err=err) @@ -343,7 +381,7 @@ class DconfPreference(object): rc, out, err = dbus_wrapper.run_command(command) if rc != 0: - self.module.fail_json(msg='dconf failed while reseting the value with error: %s' % err, + self.module.fail_json(msg='dconf failed while resetting the value with error: %s' % err, out=out, err=err) @@ -357,17 +395,61 @@ def main(): argument_spec=dict( state=dict(default='present', choices=['present', 'absent', 'read']), key=dict(required=True, type='str', no_log=False), - value=dict(required=False, default=None, type='str'), + # Converted to str below after special handling of bool. + value=dict(type='raw'), ), - supports_check_mode=True + supports_check_mode=True, + required_if=[ + ('state', 'present', ['value']), + ], ) - if not HAS_PSUTIL: - module.fail_json(msg=missing_required_lib("psutil"), exception=PSUTIL_IMP_ERR) + if Variant is None: + # This interpreter can't see the GLib module. To try to fix that, we'll + # look in common locations for system-owned interpreters that can see + # it; if we find one, we'll respawn under it. Otherwise we'll proceed + # with degraded performance, without the ability to parse GVariants. + # Later (in a different PR) we'll actually deprecate this degraded + # performance level and fail with an error if the library can't be + # found. - # If present state was specified, value must be provided. - if module.params['state'] == 'present' and module.params['value'] is None: - module.fail_json(msg='State "present" requires "value" to be set.') + if has_respawned(): + # This shouldn't be possible; short-circuit early if it happens. + module.fail_json( + msg="%s must be installed and visible from %s." % + (glib_module_name, sys.executable)) + + interpreters = ['/usr/bin/python3', '/usr/bin/python'] + + interpreter = probe_interpreters_for_module( + interpreters, glib_module_name) + + if interpreter: + # Found the Python bindings; respawn this module under the + # interpreter where we found them. + respawn_module(interpreter) + # This is the end of the line for this process, it will exit here + # once the respawned module has completed. + + # Try to be forgiving about the user specifying a boolean as the value, or + # more accurately about the fact that YAML and Ansible are quite insistent + # about converting strings that look like booleans into booleans. Convert + # the boolean into a string of the type dconf will understand. Any type for + # the value other than boolean is just converted into a string directly. + if module.params['value'] is not None: + if isinstance(module.params['value'], bool): + module.params['value'] = 'true' if module.params['value'] else 'false' + else: + module.params['value'] = to_native( + module.params['value'], errors='surrogate_or_strict') + + if Variant is None: + module.warn( + 'WARNING: The gi.repository Python library is not available; ' + 'using string comparison to check value equality. This fallback ' + 'will be deprecated in a future version of community.general.') + + deps.validate(module) # Create wrapper instance. dconf = DconfPreference(module, module.check_mode) diff --git a/plugins/modules/decompress.py b/plugins/modules/decompress.py new file mode 100644 index 0000000000..3746810ca9 --- /dev/null +++ b/plugins/modules/decompress.py @@ -0,0 +1,201 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Stanislav Shamilov +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: decompress +short_description: Decompresses compressed files +version_added: 10.1.0 +description: + - Decompresses compressed files. + - The source (compressed) file and destination (decompressed) files are on the remote host. + - Source file can be deleted after decompression. +extends_documentation_fragment: + - ansible.builtin.files + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + src: + description: + - Remote absolute path for the file to decompress. + type: path + required: true + dest: + description: + - The file name of the destination file where the compressed file is decompressed. + - If the destination file exists, it is truncated and overwritten. + - If not specified, the destination filename is derived from O(src) by removing the compression format extension. For + example, when O(src) is V(/path/to/file.txt.gz) and O(format) is V(gz), O(dest) is V(/path/to/file.txt). If the O(src) + file does not have an extension for the current O(format), the O(dest) filename is made by appending C(_decompressed) + to the O(src) filename. For instance, when O(src) is V(/path/to/file.myextension), the (dest) filename is V(/path/to/file.myextension_decompressed). + type: path + format: + description: + - The type of compression to use to decompress. + type: str + choices: [gz, bz2, xz] + default: gz + remove: + description: + - Remove original compressed file after decompression. + type: bool + default: false +requirements: + - Requires C(lzma) (standard library of Python 3) if using C(xz) format. +author: + - Stanislav Shamilov (@shamilovstas) +""" + +EXAMPLES = r""" +- name: Decompress file /path/to/file.txt.gz into /path/to/file.txt (gz compression is used by default) + community.general.decompress: + src: /path/to/file.txt.gz + dest: /path/to/file.txt + +- name: Decompress file /path/to/file.txt.gz into /path/to/file.txt + community.general.decompress: + src: /path/to/file.txt.gz + +- name: Decompress file compressed with bzip2 + community.general.decompress: + src: /path/to/file.txt.bz2 + dest: /path/to/file.bz2 + format: bz2 + +- name: Decompress file and delete the compressed file afterwards + community.general.decompress: + src: /path/to/file.txt.gz + dest: /path/to/file.txt + remove: true +""" + +RETURN = r""" +dest: + description: Path to decompressed file. + type: str + returned: success + sample: /path/to/file.txt +""" + +import bz2 +import filecmp +import gzip +import os +import shutil +import tempfile + +from ansible_collections.community.general.plugins.module_utils.mh.module_helper import ModuleHelper +from ansible.module_utils.common.text.converters import to_native, to_bytes +from ansible_collections.community.general.plugins.module_utils import deps + +with deps.declare("lzma"): + import lzma + + +def lzma_decompress(src): + return lzma.open(src, "rb") + + +def bz2_decompress(src): + return bz2.open(src, "rb") + + +def gzip_decompress(src): + return gzip.open(src, "rb") + + +def decompress(b_src, b_dest, handler): + with handler(b_src) as src_file: + with open(b_dest, "wb") as dest_file: + shutil.copyfileobj(src_file, dest_file) + + +class Decompress(ModuleHelper): + destination_filename_template = "%s_decompressed" + output_params = 'dest' + + module = dict( + argument_spec=dict( + src=dict(type='path', required=True), + dest=dict(type='path'), + format=dict(type='str', default='gz', choices=['gz', 'bz2', 'xz']), + remove=dict(type='bool', default=False) + ), + add_file_common_args=True, + supports_check_mode=True + ) + + def __init_module__(self): + self.handlers = {"gz": gzip_decompress, "bz2": bz2_decompress, "xz": lzma_decompress} + if self.vars.dest is None: + self.vars.dest = self.get_destination_filename() + deps.validate(self.module) + self.configure() + + def configure(self): + b_dest = to_bytes(self.vars.dest, errors='surrogate_or_strict') + b_src = to_bytes(self.vars.src, errors='surrogate_or_strict') + if not os.path.exists(b_src): + if self.vars.remove and os.path.exists(b_dest): + self.module.exit_json(changed=False) + else: + self.do_raise(msg="Path does not exist: '%s'" % b_src) + if os.path.isdir(b_src): + self.do_raise(msg="Cannot decompress directory '%s'" % b_src) + if os.path.isdir(b_dest): + self.do_raise(msg="Destination is a directory, cannot decompress: '%s'" % b_dest) + + def __run__(self): + b_dest = to_bytes(self.vars.dest, errors='surrogate_or_strict') + b_src = to_bytes(self.vars.src, errors='surrogate_or_strict') + + file_args = self.module.load_file_common_arguments(self.module.params, path=self.vars.dest) + handler = self.handlers[self.vars.format] + try: + tempfd, temppath = tempfile.mkstemp(dir=self.module.tmpdir) + self.module.add_cleanup_file(temppath) + b_temppath = to_bytes(temppath, errors='surrogate_or_strict') + decompress(b_src, b_temppath, handler) + except OSError as e: + self.do_raise(msg="Unable to create temporary file '%s'" % to_native(e)) + + if os.path.exists(b_dest): + self.changed = not filecmp.cmp(b_temppath, b_dest, shallow=False) + else: + self.changed = True + + if self.changed and not self.module.check_mode: + try: + self.module.atomic_move(b_temppath, b_dest) + except OSError: + self.do_raise(msg="Unable to move temporary file '%s' to '%s'" % (b_temppath, self.vars.dest)) + + if self.vars.remove and not self.check_mode: + os.remove(b_src) + self.changed = self.module.set_fs_attributes_if_different(file_args, self.changed) + + def get_destination_filename(self): + src = self.vars.src + fmt_extension = ".%s" % self.vars.format + if src.endswith(fmt_extension) and len(src) > len(fmt_extension): + filename = src[:-len(fmt_extension)] + else: + filename = Decompress.destination_filename_template % src + return filename + + +def main(): + Decompress.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/deploy_helper.py b/plugins/modules/deploy_helper.py index afa63cba19..d9380d36f4 100644 --- a/plugins/modules/deploy_helper.py +++ b/plugins/modules/deploy_helper.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2014, Jasper N. Brouwer # Copyright (c) 2014, Ramon de la Fuente @@ -7,29 +6,27 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: deploy_helper author: "Ramon de la Fuente (@ramondelafuente)" short_description: Manages some of the steps common in deploying projects description: - - The Deploy Helper manages some of the steps common in deploying software. - It creates a folder structure, manages a symlink for the current release - and cleans up old releases. - - "Running it with the I(state=query) or I(state=present) will return the C(deploy_helper) fact. - C(project_path), whatever you set in the I(path) parameter, - C(current_path), the path to the symlink that points to the active release, - C(releases_path), the path to the folder to keep releases in, - C(shared_path), the path to the folder to keep shared resources in, - C(unfinished_filename), the file to check for to recognize unfinished builds, - C(previous_release), the release the 'current' symlink is pointing to, - C(previous_release_path), the full path to the 'current' symlink target, - C(new_release), either the 'release' parameter or a generated timestamp, - C(new_release_path), the path to the new release folder (not created by the module)." + - The Deploy Helper manages some of the steps common in deploying software. It creates a folder structure, manages a symlink + for the current release and cleans up old releases. + - Running it with the O(state=query) or O(state=present) returns the C(deploy_helper) fact. C(project_path), whatever you + set in the O(path) parameter, C(current_path), the path to the symlink that points to the active release, C(releases_path), + the path to the folder to keep releases in, C(shared_path), the path to the folder to keep shared resources in, C(unfinished_filename), + the file to check for to recognize unfinished builds, C(previous_release), the release the 'current' symlink is pointing + to, C(previous_release_path), the full path to the 'current' symlink target, C(new_release), either the O(release) parameter + or a generated timestamp, C(new_release_path), the path to the new release folder (not created by the module). +attributes: + check_mode: + support: full + diff_mode: + support: none options: path: @@ -37,85 +34,81 @@ options: required: true aliases: ['dest'] description: - - The root path of the project. - Returned in the C(deploy_helper.project_path) fact. - + - The root path of the project. Returned in the C(deploy_helper.project_path) fact. state: type: str description: - The state of the project. - C(query) will only gather facts, - C(present) will create the project I(root) folder, and in it the I(releases) and I(shared) folders, - C(finalize) will remove the unfinished_filename file, create a symlink to the newly - deployed release and optionally clean old releases, - C(clean) will remove failed & old releases, - C(absent) will remove the project folder (synonymous to the M(ansible.builtin.file) module with I(state=absent)). - choices: [ present, finalize, absent, clean, query ] + - V(query) gathers facts. + - V(present) creates the project C(root) folder, and in it the C(releases) and C(shared) folders. + - V(finalize) removes the unfinished_filename file, creates a symlink to the newly deployed release and optionally cleans + old releases. + - V(clean) removes failed & old releases. + - V(absent) removes the project folder (synonymous to the M(ansible.builtin.file) module with O(state=absent)). + choices: [present, finalize, absent, clean, query] default: present release: type: str description: - - The release version that is being deployed. Defaults to a timestamp format %Y%m%d%H%M%S (i.e. '20141119223359'). - This parameter is optional during I(state=present), but needs to be set explicitly for I(state=finalize). - You can use the generated fact I(release={{ deploy_helper.new_release }}). - + - The release version that is being deployed. Defaults to a timestamp format C(%Y%m%d%H%M%S) (for example V(20141119223359)). + This parameter is optional during O(state=present), but needs to be set explicitly for O(state=finalize). You can + use the generated fact C(release={{ deploy_helper.new_release }}). releases_path: type: str description: - - The name of the folder that will hold the releases. This can be relative to I(path) or absolute. - Returned in the C(deploy_helper.releases_path) fact. + - The name of the folder that holds the releases. This can be relative to O(path) or absolute. Returned in the C(deploy_helper.releases_path) + fact. default: releases shared_path: type: path description: - - The name of the folder that will hold the shared resources. This can be relative to I(path) or absolute. - If this is set to an empty string, no shared folder will be created. - Returned in the C(deploy_helper.shared_path) fact. + - The name of the folder that holds the shared resources. This can be relative to O(path) or absolute. If this is set + to an empty string, no shared folder is created. Returned in the C(deploy_helper.shared_path) fact. default: shared current_path: type: path description: - - The name of the symlink that is created when the deploy is finalized. Used in I(finalize) and I(clean). + - The name of the symlink that is created when the deploy is finalized. Used in O(state=finalize) and O(state=clean). Returned in the C(deploy_helper.current_path) fact. default: current unfinished_filename: type: str description: - - The name of the file that indicates a deploy has not finished. All folders in the I(releases_path) that - contain this file will be deleted on I(state=finalize) with I(clean=True), or I(state=clean). This file is - automatically deleted from the I(new_release_path) during I(state=finalize). + - The name of the file that indicates a deploy has not finished. All folders in the O(releases_path) that contain this + file are deleted on O(state=finalize) with O(clean=true), or O(state=clean). This file is automatically deleted from + the C(new_release_path) during O(state=finalize). default: DEPLOY_UNFINISHED clean: description: - - Whether to run the clean procedure in case of I(state=finalize). + - Whether to run the clean procedure in case of O(state=finalize). type: bool default: true keep_releases: type: int description: - - The number of old releases to keep when cleaning. Used in I(finalize) and I(clean). Any unfinished builds - will be deleted first, so only correct releases will count. The current version will not count. + - The number of old releases to keep when cleaning. Used in O(state=finalize) and O(state=clean). Any unfinished builds + are deleted first, so only correct releases count. The current version does not count. default: 5 notes: - - Facts are only returned for I(state=query) and I(state=present). If you use both, you should pass any overridden - parameters to both calls, otherwise the second call will overwrite the facts of the first one. - - When using I(state=clean), the releases are ordered by I(creation date). You should be able to switch to a - new naming strategy without problems. - - Because of the default behaviour of generating the I(new_release) fact, this module will not be idempotent - unless you pass your own release name with I(release). Due to the nature of deploying software, this should not - be much of a problem. -extends_documentation_fragment: files -''' - -EXAMPLES = ''' + - Facts are only returned for O(state=query) and O(state=present). If you use both, you should pass any overridden parameters + to both calls, otherwise the second call overwrites the facts of the first one. + - When using O(state=clean), the releases are ordered by I(creation date). You should be able to switch to a new naming + strategy without problems. + - Because of the default behaviour of generating the C(new_release) fact, this module is not idempotent unless you pass + your own release name with O(release). Due to the nature of deploying software, this should not be much of a problem. +extends_documentation_fragment: + - ansible.builtin.files + - community.general.attributes +""" +EXAMPLES = r""" # General explanation, starting with an example folder structure for a project: # root: @@ -183,10 +176,10 @@ EXAMPLES = ''' src: '{{ deploy_helper.shared_path }}/{{ item.src }}' state: link with_items: - - path: app/sessions - src: sessions - - path: web/uploads - src: uploads + - path: app/sessions + src: sessions + - path: web/uploads + src: uploads - name: Finalize the deploy, removing the unfinished file and switching the symlink community.general.deploy_helper: path: /path/to/root @@ -268,7 +261,8 @@ EXAMPLES = ''' path: /path/to/root - ansible.builtin.debug: var: deploy_helper -''' +""" + import os import shutil import time diff --git a/plugins/modules/dimensiondata_network.py b/plugins/modules/dimensiondata_network.py index d88b4a9339..80ac17d47d 100644 --- a/plugins/modules/dimensiondata_network.py +++ b/plugins/modules/dimensiondata_network.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (c) 2016 Dimension Data # Authors: @@ -10,21 +9,25 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: dimensiondata_network short_description: Create, update, and delete MCP 1.0 & 2.0 networks extends_documentation_fragment: -- community.general.dimensiondata -- community.general.dimensiondata_wait + - community.general.dimensiondata + - community.general.dimensiondata_wait + - community.general.attributes description: - - Create, update, and delete MCP 1.0 & 2.0 networks + - Create, update, and delete MCP 1.0 & 2.0 networks. author: 'Aimon Bustardo (@aimonb)' +attributes: + check_mode: + support: none + diff_mode: + support: none options: name: description: @@ -49,9 +52,9 @@ options: choices: [present, absent] default: present type: str -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create an MCP 1.0 network community.general.dimensiondata_network: region: na @@ -73,43 +76,43 @@ EXAMPLES = ''' location: NA1 name: mynet state: absent -''' +""" -RETURN = ''' +RETURN = r""" network: - description: Dictionary describing the network. - returned: On success when I(state=present). - type: complex - contains: - id: - description: Network ID. - type: str - sample: "8c787000-a000-4050-a215-280893411a7d" - name: - description: Network name. - type: str - sample: "My network" - description: - description: Network description. - type: str - sample: "My network description" - location: - description: Datacenter location. - type: str - sample: NA3 - status: - description: Network status. (MCP 2.0 only) - type: str - sample: NORMAL - private_net: - description: Private network subnet. (MCP 1.0 only) - type: str - sample: "10.2.3.0" - multicast: - description: Multicast enabled? (MCP 1.0 only) - type: bool - sample: false -''' + description: Dictionary describing the network. + returned: On success when O(state=present). + type: complex + contains: + id: + description: Network ID. + type: str + sample: "8c787000-a000-4050-a215-280893411a7d" + name: + description: Network name. + type: str + sample: "My network" + description: + description: Network description. + type: str + sample: "My network description" + location: + description: Datacenter location. + type: str + sample: NA3 + status: + description: Network status. (MCP 2.0 only). + type: str + sample: NORMAL + private_net: + description: Private network subnet. (MCP 1.0 only). + type: str + sample: "10.2.3.0" + multicast: + description: Multicast enabled? (MCP 1.0 only). + type: bool + sample: false +""" import traceback from ansible.module_utils.basic import AnsibleModule @@ -135,7 +138,7 @@ class DimensionDataNetworkModule(DimensionDataModule): module=AnsibleModule( argument_spec=DimensionDataModule.argument_spec_with_wait( name=dict(type='str', required=True), - description=dict(type='str', required=False), + description=dict(type='str'), service_plan=dict(default='ESSENTIALS', choices=['ADVANCED', 'ESSENTIALS']), state=dict(default='present', choices=['present', 'absent']) ), diff --git a/plugins/modules/dimensiondata_vlan.py b/plugins/modules/dimensiondata_vlan.py index 86db5e5057..8f3de75b25 100644 --- a/plugins/modules/dimensiondata_vlan.py +++ b/plugins/modules/dimensiondata_vlan.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2016 Dimension Data # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later @@ -7,20 +6,24 @@ # Authors: # - Adam Friedman -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: dimensiondata_vlan short_description: Manage a VLAN in a Cloud Control network domain extends_documentation_fragment: -- community.general.dimensiondata -- community.general.dimensiondata_wait + - community.general.dimensiondata + - community.general.dimensiondata_wait + - community.general.attributes description: - Manage VLANs in Cloud Control network domains. author: 'Adam Friedman (@tintoy)' +attributes: + check_mode: + support: none + diff_mode: + support: none options: name: description: @@ -34,37 +37,38 @@ options: default: '' network_domain: description: - - The Id or name of the target network domain. + - The ID or name of the target network domain. required: true type: str private_ipv4_base_address: description: - - The base address for the VLAN's IPv4 network (e.g. 192.168.1.0). + - The base address for the VLAN's IPv4 network (for example V(192.168.1.0)). type: str default: '' private_ipv4_prefix_size: description: - - The size of the IPv4 address space, e.g 24. - - Required, if C(private_ipv4_base_address) is specified. + - The size of the IPv4 address space, for example V(24). + - Required, if O(private_ipv4_base_address) is specified. type: int default: 0 state: description: - The desired state for the target VLAN. - - C(readonly) ensures that the state is only ever read, not modified (the module will fail if the resource does not exist). + - V(readonly) ensures that the state is only ever read, not modified (the module fails if the resource does not exist). choices: [present, absent, readonly] default: present type: str allow_expand: description: - - Permit expansion of the target VLAN's network if the module parameters specify a larger network than the VLAN currently possesses. - - If C(False), the module will fail under these conditions. + - Permit expansion of the target VLAN's network if the module parameters specify a larger network than the VLAN currently + possesses. + - If V(false), the module fails under these conditions. - This is intended to prevent accidental expansion of a VLAN's network (since this operation is not reversible). type: bool default: false -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Add or update VLAN community.general.dimensiondata_vlan: region: na @@ -94,59 +98,59 @@ EXAMPLES = ''' name: my_vlan_1 state: absent wait: true -''' +""" -RETURN = ''' +RETURN = r""" vlan: - description: Dictionary describing the VLAN. - returned: On success when I(state) is 'present' - type: complex - contains: - id: - description: VLAN ID. - type: str - sample: "aaaaa000-a000-4050-a215-2808934ccccc" - name: - description: VLAN name. - type: str - sample: "My VLAN" - description: - description: VLAN description. - type: str - sample: "My VLAN description" - location: - description: Datacenter location. - type: str - sample: NA3 - private_ipv4_base_address: - description: The base address for the VLAN's private IPV4 network. - type: str - sample: 192.168.23.0 - private_ipv4_prefix_size: - description: The prefix size for the VLAN's private IPV4 network. - type: int - sample: 24 - private_ipv4_gateway_address: - description: The gateway address for the VLAN's private IPV4 network. - type: str - sample: 192.168.23.1 - private_ipv6_base_address: - description: The base address for the VLAN's IPV6 network. - type: str - sample: 2402:9900:111:1195:0:0:0:0 - private_ipv6_prefix_size: - description: The prefix size for the VLAN's IPV6 network. - type: int - sample: 64 - private_ipv6_gateway_address: - description: The gateway address for the VLAN's IPV6 network. - type: str - sample: 2402:9900:111:1195:0:0:0:1 - status: - description: VLAN status. - type: str - sample: NORMAL -''' + description: Dictionary describing the VLAN. + returned: On success when O(state=present) + type: complex + contains: + id: + description: VLAN ID. + type: str + sample: "aaaaa000-a000-4050-a215-2808934ccccc" + name: + description: VLAN name. + type: str + sample: "My VLAN" + description: + description: VLAN description. + type: str + sample: "My VLAN description" + location: + description: Datacenter location. + type: str + sample: NA3 + private_ipv4_base_address: + description: The base address for the VLAN's private IPV4 network. + type: str + sample: 192.168.23.0 + private_ipv4_prefix_size: + description: The prefix size for the VLAN's private IPV4 network. + type: int + sample: 24 + private_ipv4_gateway_address: + description: The gateway address for the VLAN's private IPV4 network. + type: str + sample: 192.168.23.1 + private_ipv6_base_address: + description: The base address for the VLAN's IPV6 network. + type: str + sample: 2402:9900:111:1195:0:0:0:0 + private_ipv6_prefix_size: + description: The prefix size for the VLAN's IPV6 network. + type: int + sample: 64 + private_ipv6_gateway_address: + description: The gateway address for the VLAN's IPV6 network. + type: str + sample: 2402:9900:111:1195:0:0:0:1 + status: + description: VLAN status. + type: str + sample: NORMAL +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.dimensiondata import DimensionDataModule, UnknownNetworkError @@ -180,7 +184,7 @@ class DimensionDataVlanModule(DimensionDataModule): network_domain=dict(required=True, type='str'), private_ipv4_base_address=dict(default='', type='str'), private_ipv4_prefix_size=dict(default=0, type='int'), - allow_expand=dict(required=False, default=False, type='bool'), + allow_expand=dict(default=False, type='bool'), state=dict(default='present', choices=['present', 'absent', 'readonly']) ), required_together=DimensionDataModule.required_together() diff --git a/plugins/modules/discord.py b/plugins/modules/discord.py index 9df00ad7dd..9cb732eb02 100644 --- a/plugins/modules/discord.py +++ b/plugins/modules/discord.py @@ -1,15 +1,12 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2021, Christian Wollinger # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: discord short_description: Send Discord messages version_added: 3.1.0 @@ -18,25 +15,32 @@ description: author: Christian Wollinger (@cwollinger) seealso: - name: API documentation - description: Documentation for Discord API + description: Documentation for Discord API. link: https://discord.com/developers/docs/resources/webhook#execute-webhook +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: webhook_id: description: - The webhook ID. - - "Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token})." + - 'Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token}).' required: true type: str webhook_token: description: - The webhook token. - - "Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token})." + - 'Format from Discord webhook URL: C(/webhooks/{webhook.id}/{webhook.token}).' required: true type: str content: description: - Content of the message to the Discord channel. - - At least one of I(content) and I(embeds) must be specified. + - At least one of O(content) and O(embeds) must be specified. type: str username: description: @@ -48,20 +52,20 @@ options: type: str tts: description: - - Set this to C(true) if this is a TTS (Text to Speech) message. + - Set this to V(true) if this is a TTS (Text to Speech) message. type: bool default: false embeds: description: - Send messages as Embeds to the Discord channel. - Embeds can have a colored border, embedded images, text fields and more. - - "Allowed parameters are described in the Discord Docs: U(https://discord.com/developers/docs/resources/channel#embed-object)" - - At least one of I(content) and I(embeds) must be specified. + - 'Allowed parameters are described in the Discord Docs: U(https://discord.com/developers/docs/resources/channel#embed-object).' + - At least one of O(content) and O(embeds) must be specified. type: list elements: dict -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Send a message to the Discord channel community.general.discord: webhook_id: "00000" @@ -112,7 +116,7 @@ EXAMPLES = """ timestamp: "{{ ansible_date_time.iso8601 }}" """ -RETURN = """ +RETURN = r""" http_code: description: - Response Code returned by Discord API. diff --git a/plugins/modules/django_check.py b/plugins/modules/django_check.py new file mode 100644 index 0000000000..f2ee357072 --- /dev/null +++ b/plugins/modules/django_check.py @@ -0,0 +1,114 @@ +#!/usr/bin/python +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: django_check +author: + - Alexei Znamensky (@russoz) +short_description: Wrapper for C(django-admin check) +version_added: 9.1.0 +description: + - This module is a wrapper for the execution of C(django-admin check). +extends_documentation_fragment: + - community.general.attributes + - community.general.django +options: + databases: + description: + - Specify databases to run checks against. + - If not specified, Django does not run database tests. + - The parameter has been renamed to O(databases) in community.general 11.3.0. The old name is still available as an alias. + type: list + elements: str + aliases: ["database"] + deploy: + description: + - Include additional checks relevant in a deployment setting. + type: bool + default: false + fail_level: + description: + - Message level that triggers failure. + - Default is the Django default value. Check the documentation for the version being used. + type: str + choices: [CRITICAL, ERROR, WARNING, INFO, DEBUG] + tags: + description: + - Restrict checks to specific tags. + type: list + elements: str + apps: + description: + - Restrict checks to specific applications. + - Default is to check all applications. + type: list + elements: str +notes: + - The outcome of the module is found in the common return values RV(ignore:stdout), RV(ignore:stderr), RV(ignore:rc). + - The module fails if RV(ignore:rc) is not zero. +attributes: + check_mode: + support: full + diff_mode: + support: none +""" + +EXAMPLES = r""" +- name: Check the entire project + community.general.django_check: + settings: myproject.settings + +- name: Create the project using specific databases + community.general.django_check: + database: + - somedb + - myotherdb + settings: fancysite.settings + pythonpath: /home/joedoe/project/fancysite + venv: /home/joedoe/project/fancysite/venv +""" + +RETURN = r""" +run_info: + description: Command-line execution information. + type: dict + returned: success and C(verbosity) >= 3 +version: + description: Version of Django. + type: str + returned: always + sample: 5.1.2 + version_added: 10.0.0 +""" + +from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper + + +class DjangoCheck(DjangoModuleHelper): + module = dict( + argument_spec=dict( + databases=dict(type="list", elements="str", aliases=["database"]), + deploy=dict(type="bool", default=False), + fail_level=dict(type="str", choices=["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"]), + tags=dict(type="list", elements="str"), + apps=dict(type="list", elements="str"), + ), + supports_check_mode=True, + ) + django_admin_cmd = "check" + django_admin_arg_order = "database_stacked_dash deploy fail_level tags apps" + + def __init_module__(self): + self.vars.set("database_stacked_dash", self.vars.databases, output=False) + + +def main(): + DjangoCheck.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/django_command.py b/plugins/modules/django_command.py new file mode 100644 index 0000000000..a6c3f409e5 --- /dev/null +++ b/plugins/modules/django_command.py @@ -0,0 +1,92 @@ +#!/usr/bin/python +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: django_command +author: + - Alexei Znamensky (@russoz) +short_description: Run Django admin commands +version_added: 9.0.0 +description: + - This module allows the execution of arbitrary Django admin commands. +extends_documentation_fragment: + - community.general.attributes + - community.general.django +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + command: + description: + - Django admin command. It must be a valid command accepted by C(python -m django) at the target system. + type: str + required: true + extra_args: + type: list + elements: str + description: + - List of extra arguments passed to the django admin command. +""" + +EXAMPLES = r""" +- name: Check the project + community.general.django_command: + command: check + settings: myproject.settings + +- name: Check the project in specified python path, using virtual environment + community.general.django_command: + command: check + settings: fancysite.settings + pythonpath: /home/joedoe/project/fancysite + venv: /home/joedoe/project/fancysite/venv +""" + +RETURN = r""" +run_info: + description: Command-line execution information. + type: dict + returned: success and O(verbosity) >= 3 +version: + description: Version of Django. + type: str + returned: always + sample: 5.1.2 + version_added: 10.0.0 +""" + +import shlex + +from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper +from ansible_collections.community.general.plugins.module_utils.cmd_runner import cmd_runner_fmt + + +class DjangoCommand(DjangoModuleHelper): + module = dict( + argument_spec=dict( + command=dict(type="str", required=True), + extra_args=dict(type="list", elements="str"), + ), + supports_check_mode=False, + ) + arg_formats = dict( + extra_args=cmd_runner_fmt.as_list(), + ) + django_admin_arg_order = "extra_args" + + def __init_module__(self): + self.vars.command = shlex.split(self.vars.command) + + +def main(): + DjangoCommand.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/django_createcachetable.py b/plugins/modules/django_createcachetable.py new file mode 100644 index 0000000000..76a31ab0b1 --- /dev/null +++ b/plugins/modules/django_createcachetable.py @@ -0,0 +1,74 @@ +#!/usr/bin/python +# Copyright (c) 2024, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: django_createcachetable +author: + - Alexei Znamensky (@russoz) +short_description: Wrapper for C(django-admin createcachetable) +version_added: 9.1.0 +description: + - This module is a wrapper for the execution of C(django-admin createcachetable). +extends_documentation_fragment: + - community.general.attributes + - community.general.django + - community.general.django.database +attributes: + check_mode: + support: full + diff_mode: + support: none +""" + +EXAMPLES = r""" +- name: Create cache table in the default database + community.general.django_createcachetable: + settings: myproject.settings + +- name: Create cache table in the other database + community.general.django_createcachetable: + database: myotherdb + settings: fancysite.settings + pythonpath: /home/joedoe/project/fancysite + venv: /home/joedoe/project/fancysite/venv +""" + +RETURN = r""" +run_info: + description: Command-line execution information. + type: dict + returned: success and O(verbosity) >= 3 +version: + description: Version of Django. + type: str + returned: always + sample: 5.1.2 + version_added: 10.0.0 +""" + +from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper + + +class DjangoCreateCacheTable(DjangoModuleHelper): + module = dict( + supports_check_mode=True, + ) + django_admin_cmd = "createcachetable" + django_admin_arg_order = "noinput database_dash dry_run" + _django_args = ["database_dash"] + _check_mode_arg = "dry_run" + + def __init_module__(self): + self.vars.set("database_dash", self.vars.database, output=False) + + +def main(): + DjangoCreateCacheTable.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/django_dumpdata.py b/plugins/modules/django_dumpdata.py new file mode 100644 index 0000000000..5c819b2755 --- /dev/null +++ b/plugins/modules/django_dumpdata.py @@ -0,0 +1,124 @@ +#!/usr/bin/python +# Copyright (c) 2025, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: django_dumpdata +author: + - Alexei Znamensky (@russoz) +short_description: Wrapper for C(django-admin dumpdata) +version_added: 11.3.0 +description: + - This module is a wrapper for the execution of C(django-admin dumpdata). +extends_documentation_fragment: + - community.general.attributes + - community.general.django + - community.general.django.database + - community.general.django.data +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + all: + description: Dump all records, including those which might otherwise be filtered or modified by a custom manager. + type: bool + indent: + description: + - Indentation size for the output. + - Default is not to indent, so the output is generated in one single line. + type: int + natural_foreign: + description: Use natural keys when serializing for foreign keys. + type: bool + natural_primary: + description: Omit primary keys when serializing. + type: bool + primary_keys: + description: + - List of primary keys to include in the dump. + - Only available when dumping one single model. + type: list + elements: str + aliases: ["pks"] + fixture: + description: + - Path to the output file. + - The fixture filename may end with V(.bz2), V(.gz), V(.lzma) or V(.xz), in which case the corresponding + compression format will be used. + - This corresponds to the C(--output) parameter for the C(django-admin dumpdata) command. + type: path + aliases: [output] + required: true + apps_models: + description: + - Dump only the applications and models listed in the dump. + - Format must be either V(app_label) or V(app_label.ModelName). + - If not passed, all applications and models are to be dumped. + type: list + elements: str +""" + +EXAMPLES = r""" +- name: Dump all data + community.general.django_dumpdata: + settings: myproject.settings + fixture: /tmp/mydata.json + +- name: Dump data excluding certain apps, into a compressed JSON file + community.general.django_dumpdata: + settings: myproject.settings + database: myotherdb + excludes: + - auth + - contenttypes + fixture: /tmp/mydata.json.gz +""" + +RETURN = r""" +run_info: + description: Command-line execution information. + type: dict + returned: success and O(verbosity) >= 3 +version: + description: Version of Django. + type: str + returned: always + sample: 5.1.2 +""" + +from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper + + +class DjangoDumpData(DjangoModuleHelper): + module = dict( + argument_spec=dict( + all=dict(type="bool"), + indent=dict(type="int"), + natural_foreign=dict(type="bool"), + natural_primary=dict(type="bool"), + primary_keys=dict(type="list", elements="str", aliases=["pks"], no_log=False), + # the underlying vardict does not allow the name "output" + fixture=dict(type="path", required=True, aliases=["output"]), + apps_models=dict(type="list", elements="str"), + ), + supports_check_mode=False, + ) + django_admin_cmd = "dumpdata" + django_admin_arg_order = "all format indent excludes database_dash natural_foreign natural_primary primary_keys fixture apps_models" + _django_args = ["data", "database_dash"] + + def __init_module__(self): + self.vars.set("database_dash", self.vars.database, output=False) + + +def main(): + DjangoDumpData.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/django_loaddata.py b/plugins/modules/django_loaddata.py new file mode 100644 index 0000000000..75b388de9a --- /dev/null +++ b/plugins/modules/django_loaddata.py @@ -0,0 +1,90 @@ +#!/usr/bin/python +# Copyright (c) 2025, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: django_loaddata +author: + - Alexei Znamensky (@russoz) +short_description: Wrapper for C(django-admin loaddata) +version_added: 11.3.0 +description: + - This module is a wrapper for the execution of C(django-admin loaddata). +extends_documentation_fragment: + - community.general.attributes + - community.general.django + - community.general.django.database + - community.general.django.data +attributes: + check_mode: + support: none + diff_mode: + support: none +options: + app: + description: Specifies a single app to look for fixtures in rather than looking in all apps. + type: str + ignore_non_existent: + description: Ignores fields and models that may have been removed since the fixture was originally generated. + type: bool + fixtures: + description: + - List of paths to the fixture files. + type: list + elements: path +""" + +EXAMPLES = r""" +- name: Dump all data + community.general.django_dumpdata: + settings: myproject.settings + +- name: Create cache table in the other database + community.general.django_createcachetable: + database: myotherdb + settings: fancysite.settings + pythonpath: /home/joedoe/project/fancysite + venv: /home/joedoe/project/fancysite/venv +""" + +RETURN = r""" +run_info: + description: Command-line execution information. + type: dict + returned: success and O(verbosity) >= 3 +version: + description: Version of Django. + type: str + returned: always + sample: 5.1.2 +""" + +from ansible_collections.community.general.plugins.module_utils.django import DjangoModuleHelper + + +class DjangoLoadData(DjangoModuleHelper): + module = dict( + argument_spec=dict( + app=dict(type="str"), + ignore_non_existent=dict(type="bool"), + fixtures=dict(type="list", elements="path"), + ), + supports_check_mode=False, + ) + django_admin_cmd = "loaddata" + django_admin_arg_order = "database_dash ignore_non_existent app format excludes fixtures" + _django_args = ["data", "database_dash"] + + def __init_module__(self): + self.vars.set("database_dash", self.vars.database, output=False) + + +def main(): + DjangoLoadData.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/django_manage.py b/plugins/modules/django_manage.py index 188ff2d3df..ddda99849e 100644 --- a/plugins/modules/django_manage.py +++ b/plugins/modules/django_manage.py @@ -1,43 +1,41 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2022, Alexei Znamensky # Copyright (c) 2013, Scott Anderson # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: django_manage short_description: Manages a Django application description: - - Manages a Django application using the C(manage.py) application frontend to C(django-admin). With the - I(virtualenv) parameter, all management commands will be executed by the given C(virtualenv) installation. + - Manages a Django application using the C(manage.py) application frontend to C(django-admin). With the O(virtualenv) parameter, + all management commands are executed by the given C(virtualenv) installation. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: command: description: - - The name of the Django management command to run. The commands listed below are built in this module and have some basic parameter validation. - - > - C(cleanup) - clean up old data from the database (deprecated in Django 1.5). This parameter will be - removed in community.general 9.0.0. Use C(clearsessions) instead. - - C(collectstatic) - Collects the static files into C(STATIC_ROOT). - - C(createcachetable) - Creates the cache tables for use with the database cache backend. - - C(flush) - Removes all data from the database. - - C(loaddata) - Searches for and loads the contents of the named I(fixtures) into the database. - - C(migrate) - Synchronizes the database state with models and migrations. - - > - C(syncdb) - Synchronizes the database state with models and migrations (deprecated in Django 1.7). - This parameter will be removed in community.general 9.0.0. Use C(migrate) instead. - - C(test) - Runs tests for all installed apps. - - > - C(validate) - Validates all installed models (deprecated in Django 1.7). This parameter will be - removed in community.general 9.0.0. Use C(check) instead. - - Other commands can be entered, but will fail if they are unknown to Django. Other commands that may - prompt for user input should be run with the C(--noinput) flag. + - The name of the Django management command to run. The commands listed below are built in this module and have some + basic parameter validation. + - V(collectstatic) - Collects the static files into C(STATIC_ROOT). + - V(createcachetable) - Creates the cache tables for use with the database cache backend. + - V(flush) - Removes all data from the database. + - V(loaddata) - Searches for and loads the contents of the named O(fixtures) into the database. + - V(migrate) - Synchronizes the database state with models and migrations. + - V(test) - Runs tests for all installed apps. + - Custom commands can be entered, but they fail unless they are known to Django. Custom commands that may prompt for + user input should be run with the C(--noinput) flag. + - Support for the values V(cleanup), V(syncdb), V(validate) was removed in community.general 9.0.0. See note about supported + versions of Django. type: str required: true project_path: @@ -48,72 +46,71 @@ options: aliases: [app_path, chdir] settings: description: - - The Python path to the application's settings module, such as C(myapp.settings). + - The Python path to the application's settings module, such as V(myapp.settings). type: path required: false pythonpath: description: - - A directory to add to the Python path. Typically used to include the settings module if it is located - external to the application directory. - - This would be equivalent to adding I(pythonpath)'s value to the C(PYTHONPATH) environment variable. + - A directory to add to the Python path. Typically used to include the settings module if it is located external to + the application directory. + - This would be equivalent to adding O(pythonpath)'s value to the E(PYTHONPATH) environment variable. type: path required: false aliases: [python_path] virtualenv: description: - An optional path to a C(virtualenv) installation to use while running the manage application. + - The virtual environment must exist, otherwise the module fails. type: path aliases: [virtual_env] apps: description: - - A list of space-delimited apps to target. Used by the C(test) command. + - A list of space-delimited apps to target. Used by the V(test) command. type: str required: false cache_table: description: - - The name of the table used for database-backed caching. Used by the C(createcachetable) command. + - The name of the table used for database-backed caching. Used by the V(createcachetable) command. type: str required: false clear: description: - Clear the existing files before trying to copy or link the original file. - - Used only with the C(collectstatic) command. The C(--noinput) argument will be added automatically. + - Used only with the V(collectstatic) command. The C(--noinput) argument is added automatically. required: false default: false type: bool database: description: - - The database to target. Used by the C(createcachetable), C(flush), C(loaddata), C(syncdb), - and C(migrate) commands. + - The database to target. Used by the V(createcachetable), V(flush), V(loaddata), V(syncdb), and V(migrate) commands. type: str required: false failfast: description: - - Fail the command immediately if a test fails. Used by the C(test) command. + - Fail the command immediately if a test fails. Used by the V(test) command. required: false default: false type: bool aliases: [fail_fast] fixtures: description: - - A space-delimited list of fixture file names to load in the database. B(Required) by the C(loaddata) command. + - A space-delimited list of fixture file names to load in the database. B(Required) by the V(loaddata) command. type: str required: false skip: description: - - Will skip over out-of-order missing migrations, you can only use this parameter with C(migrate) command. + - Skips over out-of-order missing migrations, you can only use this parameter with V(migrate) command. required: false type: bool merge: description: - - Will run out-of-order or missing migrations as they are not rollback migrations, you can only use this - parameter with C(migrate) command. + - Runs out-of-order or missing migrations as they are not rollback migrations, you can only use this parameter with + V(migrate) command. required: false type: bool link: description: - - Will create links to the files instead of copying them, you can only use this parameter with - C(collectstatic) command. + - Creates links to the files instead of copying them, you can only use this parameter with V(collectstatic) command. required: false type: bool testrunner: @@ -123,35 +120,19 @@ options: type: str required: false aliases: [test_runner] - ack_venv_creation_deprecation: - description: - - >- - When a I(virtualenv) is set but the virtual environment does not exist, the current behavior is - to create a new virtual environment. That behavior is deprecated and if that case happens it will - generate a deprecation warning. Set this flag to C(true) to suppress the deprecation warning. - - Please note that you will receive no further warning about this being removed until the module - will start failing in such cases from community.general 9.0.0 on. - type: bool - version_added: 5.8.0 notes: - - > - B(ATTENTION - DEPRECATION): Support for Django releases older than 4.1 will be removed in - community.general version 9.0.0 (estimated to be released in May 2024). - Please notice that Django 4.1 requires Python 3.8 or greater. - - C(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the I(virtualenv) parameter - is specified. This requirement is deprecated and will be removed in community.general version 9.0.0. - - This module will create a virtualenv if the I(virtualenv) parameter is specified and a virtual environment does not already - exist at the given location. This behavior is deprecated and will be removed in community.general version 9.0.0. - - The parameter I(virtualenv) will remain in use, but it will require the specified virtualenv to exist. - The recommended way to create one in Ansible is by using M(ansible.builtin.pip). - - This module assumes English error messages for the C(createcachetable) command to detect table existence, - unfortunately. - - To be able to use the C(migrate) command with django versions < 1.7, you must have C(south) installed and added - as an app in your settings. - - To be able to use the C(collectstatic) command, you must have enabled staticfiles in your settings. - - Your C(manage.py) application must be executable (rwxr-xr-x), and must have a valid shebang, - i.e. C(#!/usr/bin/env python), for invoking the appropriate Python interpreter. + - 'B(ATTENTION): Support for Django releases older than 4.1 has been removed in community.general version 9.0.0. While the + module allows for free-form commands, not verifying the version of Django being used, it is B(strongly recommended) to + use a more recent version of the framework.' + - Please notice that Django 4.1 requires Python 3.8 or greater. + - This module does not create a virtualenv if the O(virtualenv) parameter is specified and a virtual environment does not + already exist at the given location. This behavior changed in community.general version 9.0.0. + - The recommended way to create a virtual environment in Ansible is by using M(ansible.builtin.pip). + - This module assumes English error messages for the V(createcachetable) command to detect table existence, unfortunately. + - To be able to use the V(collectstatic) command, you must have enabled C(staticfiles) in your settings. + - Your C(manage.py) application must be executable (C(rwxr-xr-x)), and must have a valid shebang, for example C(#!/usr/bin/env + python), for invoking the appropriate Python interpreter. seealso: - name: django-admin and manage.py Reference description: Reference for C(django-admin) or C(manage.py) commands. @@ -162,16 +143,16 @@ seealso: - name: What Python version can I use with Django? description: From the Django FAQ, the response to Python requirements for the framework. link: https://docs.djangoproject.com/en/dev/faq/install/#what-python-version-can-i-use-with-django -requirements: [ "virtualenv", "django" ] +requirements: ["django >= 4.1"] author: - Alexei Znamensky (@russoz) - Scott Anderson (@tastychutney) -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Run cleanup on the application installed in django_dir community.general.django_manage: - command: cleanup + command: clearsessions project_path: "{{ django_dir }}" - name: Load the initial_data fixture into the application @@ -182,7 +163,7 @@ EXAMPLES = """ - name: Run syncdb on the application community.general.django_manage: - command: syncdb + command: migrate project_path: "{{ django_dir }}" settings: "{{ settings_app_name }}" pythonpath: "{{ settings_dir }}" @@ -226,22 +207,7 @@ def _ensure_virtualenv(module): activate = os.path.join(vbin, 'activate') if not os.path.exists(activate): - # In version 9.0.0, if the venv is not found, it should fail_json() here. - if not module.params['ack_venv_creation_deprecation']: - module.deprecate( - 'The behavior of "creating the virtual environment when missing" is being ' - 'deprecated and will be removed in community.general version 9.0.0. ' - 'Set the module parameter `ack_venv_creation_deprecation: true` to ' - 'prevent this message from showing up when creating a virtualenv.', - version='9.0.0', - collection_name='community.general', - ) - - virtualenv = module.get_bin_path('virtualenv', True) - vcmd = [virtualenv, venv_param] - rc, out_venv, err_venv = module.run_command(vcmd) - if rc != 0: - _fail(module, vcmd, out_venv, err_venv) + module.fail_json(msg='%s does not point to a valid virtual environment' % venv_param) os.environ["PATH"] = "%s:%s" % (vbin, os.environ["PATH"]) os.environ["VIRTUAL_ENV"] = venv_param @@ -259,11 +225,6 @@ def loaddata_filter_output(line): return "Installed" in line and "Installed 0 object" not in line -def syncdb_filter_output(line): - return ("Creating table " in line) \ - or ("Installed" in line and "Installed 0 object" not in line) - - def migrate_filter_output(line): return ("Migrating forwards " in line) \ or ("Installed" in line and "Installed 0 object" not in line) \ @@ -276,13 +237,10 @@ def collectstatic_filter_output(line): def main(): command_allowed_param_map = dict( - cleanup=(), createcachetable=('cache_table', 'database', ), flush=('database', ), loaddata=('database', 'fixtures', ), - syncdb=('database', ), test=('failfast', 'testrunner', 'apps', ), - validate=(), migrate=('apps', 'skip', 'merge', 'database',), collectstatic=('clear', 'link', ), ) @@ -294,7 +252,6 @@ def main(): # forces --noinput on every command that needs it noinput_commands = ( 'flush', - 'syncdb', 'migrate', 'test', 'collectstatic', @@ -326,7 +283,6 @@ def main(): skip=dict(type='bool'), merge=dict(type='bool'), link=dict(type='bool'), - ack_venv_creation_deprecation=dict(type='bool'), ), ) @@ -335,21 +291,6 @@ def main(): project_path = module.params['project_path'] virtualenv = module.params['virtualenv'] - try: - _deprecation = dict( - cleanup="clearsessions", - syncdb="migrate", - validate="check", - ) - module.deprecate( - 'The command {0} has been deprecated as it is no longer supported in recent Django versions.' - 'Please use the command {1} instead that provide similar capability.'.format(command_bin, _deprecation[command_bin]), - version='9.0.0', - collection_name='community.general' - ) - except KeyError: - pass - for param in specific_params: value = module.params[param] if value and param not in command_allowed_param_map[command_bin]: diff --git a/plugins/modules/dnf_config_manager.py b/plugins/modules/dnf_config_manager.py new file mode 100644 index 0000000000..847e912115 --- /dev/null +++ b/plugins/modules/dnf_config_manager.py @@ -0,0 +1,225 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Andrew Hyatt +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: dnf_config_manager +short_description: Enable or disable dnf repositories using config-manager +version_added: 8.2.0 +description: + - This module enables or disables repositories using the C(dnf config-manager) sub-command. +author: Andrew Hyatt (@ahyattdev) +requirements: + - dnf + - dnf-plugins-core +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - Repository ID, for example V(crb). + default: [] + required: false + type: list + elements: str + state: + description: + - Whether the repositories should be V(enabled) or V(disabled). + default: enabled + required: false + type: str + choices: [enabled, disabled] +notes: + - Does not work with C(dnf5). +seealso: + - module: ansible.builtin.dnf + - module: ansible.builtin.yum_repository +""" + +EXAMPLES = r""" +- name: Ensure the crb repository is enabled + community.general.dnf_config_manager: + name: crb + state: enabled + +- name: Ensure the appstream and zfs repositories are disabled + community.general.dnf_config_manager: + name: + - appstream + - zfs + state: disabled +""" + +RETURN = r""" +repo_states_pre: + description: Repo IDs before action taken. + returned: success + type: dict + contains: + enabled: + description: Enabled repository IDs. + returned: success + type: list + elements: str + disabled: + description: Disabled repository IDs. + returned: success + type: list + elements: str + sample: + enabled: + - appstream + - baseos + - crb + disabled: + - appstream-debuginfo + - appstream-source + - baseos-debuginfo + - baseos-source + - crb-debug + - crb-source +repo_states_post: + description: Repository states after action taken. + returned: success + type: dict + contains: + enabled: + description: Enabled repository IDs. + returned: success + type: list + elements: str + disabled: + description: Disabled repository IDs. + returned: success + type: list + elements: str + sample: + enabled: + - appstream + - baseos + - crb + disabled: + - appstream-debuginfo + - appstream-source + - baseos-debuginfo + - baseos-source + - crb-debug + - crb-source +changed_repos: + description: Repositories changed. + returned: success + type: list + elements: str + sample: ["crb"] +""" + +from ansible.module_utils.basic import AnsibleModule +import os +import re + +DNF_BIN = "/usr/bin/dnf" +REPO_ID_RE = re.compile(r'^Repo-id\s*:\s*(\S+)$') +REPO_STATUS_RE = re.compile(r'^Repo-status\s*:\s*(disabled|enabled)$') + + +def get_repo_states(module): + rc, out, err = module.run_command([DNF_BIN, 'repolist', '--all', '--verbose'], check_rc=True) + + repos = dict() + last_repo = '' + for i, line in enumerate(out.split('\n')): + m = REPO_ID_RE.match(line) + if m: + if len(last_repo) > 0: + module.fail_json(msg='dnf repolist parse failure: parsed another repo id before next status') + last_repo = m.group(1) + continue + m = REPO_STATUS_RE.match(line) + if m: + if len(last_repo) == 0: + module.fail_json(msg='dnf repolist parse failure: parsed status before repo id') + repos[last_repo] = m.group(1) + last_repo = '' + return repos + + +def set_repo_states(module, repo_ids, state): + module.run_command([DNF_BIN, 'config-manager', '--assumeyes', '--set-{0}'.format(state)] + repo_ids, check_rc=True) + + +def pack_repo_states_for_return(states): + enabled = [] + disabled = [] + for repo_id in states: + if states[repo_id] == 'enabled': + enabled.append(repo_id) + else: + disabled.append(repo_id) + + # Sort for consistent results + enabled.sort() + disabled.sort() + + return {'enabled': enabled, 'disabled': disabled} + + +def main(): + module_args = dict( + name=dict(type='list', elements='str', default=[]), + state=dict(type='str', choices=['enabled', 'disabled'], default='enabled') + ) + + result = dict( + changed=False + ) + + module = AnsibleModule( + argument_spec=module_args, + supports_check_mode=True + ) + module.run_command_environ_update = dict(LANGUAGE='C', LC_ALL='C') + + if not os.path.exists(DNF_BIN): + module.fail_json(msg="%s was not found" % DNF_BIN) + + repo_states = get_repo_states(module) + result['repo_states_pre'] = pack_repo_states_for_return(repo_states) + + desired_repo_state = module.params['state'] + names = module.params['name'] + + to_change = [] + for repo_id in names: + if repo_id not in repo_states: + module.fail_json(msg="did not find repo with ID '{0}' in dnf repolist --all --verbose".format(repo_id)) + if repo_states[repo_id] != desired_repo_state: + to_change.append(repo_id) + result['changed'] = len(to_change) > 0 + result['changed_repos'] = to_change + + if module.check_mode: + module.exit_json(**result) + + if len(to_change) > 0: + set_repo_states(module, to_change, desired_repo_state) + + repo_states_post = get_repo_states(module) + result['repo_states_post'] = pack_repo_states_for_return(repo_states_post) + + for repo_id in to_change: + if repo_states_post[repo_id] != desired_repo_state: + module.fail_json(msg="dnf config-manager failed to make '{0}' {1}".format(repo_id, desired_repo_state)) + + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/dnf_versionlock.py b/plugins/modules/dnf_versionlock.py index a0a440b620..e6fa546107 100644 --- a/plugins/modules/dnf_versionlock.py +++ b/plugins/modules/dnf_versionlock.py @@ -1,80 +1,73 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2021, Roberto Moreda # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: dnf_versionlock version_added: '4.0.0' short_description: Locks package versions in C(dnf) based systems description: -- Locks package versions using the C(versionlock) plugin in C(dnf) based - systems. This plugin takes a set of name and versions for packages and - excludes all other versions of those packages. This allows you to for example - protect packages from being updated by newer versions. The state of the - plugin that reflects locking of packages is the C(locklist). + - Locks package versions using the C(versionlock) plugin in C(dnf) based systems. This plugin takes a set of name and versions + for packages and excludes all other versions of those packages. This allows you to for example protect packages from being + updated by newer versions. The state of the plugin that reflects locking of packages is the C(locklist). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: partial + details: + - The logics of the C(versionlock) plugin for corner cases could be confusing, so please take in account that this module + does its best to give a C(check_mode) prediction on what is going to happen. In case of doubt, check the documentation + of the plugin. + - Sometimes the module could predict changes in C(check_mode) that are not fulfilled because C(versionlock) concludes + that there is already a entry in C(locklist) that already matches. + diff_mode: + support: none options: name: description: - - Package name spec to add or exclude to or delete from the C(locklist) - using the format expected by the C(dnf repoquery) command. - - This parameter is mutually exclusive with I(state=clean). + - Package name spec to add or exclude to or delete from the C(locklist) using the format expected by the C(dnf repoquery) + command. + - This parameter is mutually exclusive with O(state=clean). type: list required: false elements: str default: [] raw: description: - - Do not resolve package name specs to NEVRAs to find specific version - to lock to. Instead the package name specs are used as they are. This - enables locking to not yet available versions of the package. + - Do not resolve package name specs to NEVRAs to find specific version to lock to. Instead the package name specs are + used as they are. This enables locking to not yet available versions of the package. type: bool default: false state: description: - - Whether to add (C(present) or C(excluded)) to or remove (C(absent) or - C(clean)) from the C(locklist). - - C(present) will add a package name spec to the C(locklist). If there is a - installed package that matches, then only that version will be added. - Otherwise, all available package versions will be added. - - C(excluded) will add a package name spec as excluded to the - C(locklist). It means that packages represented by the package name - spec will be excluded from transaction operations. All available - package versions will be added. - - C(absent) will delete entries in the C(locklist) that match the - package name spec. - - C(clean) will delete all entries in the C(locklist). This option is - mutually exclusive with C(name). - choices: [ 'absent', 'clean', 'excluded', 'present' ] + - Whether to add (V(present) or V(excluded)) to or remove (V(absent) or V(clean)) from the C(locklist). + - V(present) adds a package name spec to the C(locklist). If there is a installed package that matches, then only that + version is added. Otherwise, all available package versions are added. + - V(excluded) adds a package name spec as excluded to the C(locklist). It means that packages represented by the package + name spec are excluded from transaction operations. All available package versions are added. + - V(absent) deletes entries in the C(locklist) that match the package name spec. + - V(clean) deletes all entries in the C(locklist). This option is mutually exclusive with O(name). + choices: ['absent', 'clean', 'excluded', 'present'] type: str default: present notes: - - The logics of the C(versionlock) plugin for corner cases could be - confusing, so please take in account that this module will do its best to - give a C(check_mode) prediction on what is going to happen. In case of - doubt, check the documentation of the plugin. - - Sometimes the module could predict changes in C(check_mode) that will not - be such because C(versionlock) concludes that there is already a entry in - C(locklist) that already matches. - - In an ideal world, the C(versionlock) plugin would have a dry-run option to - know for sure what is going to happen. So far we have to work with a best - guess as close as possible to the behaviour inferred from its code. - - For most of cases where you want to lock and unlock specific versions of a - package, this works fairly well. - - Supports C(check_mode). + - In an ideal world, the C(versionlock) plugin would have a dry-run option to know for sure what is going to happen. So + far we have to work with a best guess as close as possible to the behaviour inferred from its code. + - For most of cases where you want to lock and unlock specific versions of a package, this works fairly well. + - Does not work with C(dnf5). + - This module requires Python 3.6 or greater to run, which should not be a problem for most systems that use C(dnf). requirements: - dnf - dnf-plugin-versionlock author: - Roberto Moreda (@moreda) -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Prevent installed nginx from being updated community.general.dnf_versionlock: name: nginx @@ -89,12 +82,12 @@ EXAMPLES = r''' - name: Remove lock from nginx to be updated again community.general.dnf_versionlock: - package: nginx + name: nginx state: absent - name: Exclude bind 32:9.11 from installs or updates community.general.dnf_versionlock: - package: bind-32:9.11* + name: bind-32:9.11* state: excluded - name: Keep bash package in major version 4 @@ -106,34 +99,34 @@ EXAMPLES = r''' - name: Delete all entries in the locklist of versionlock community.general.dnf_versionlock: state: clean -''' +""" -RETURN = r''' +RETURN = r""" locklist_pre: - description: Locklist before module execution. - returned: success - type: list - elements: str - sample: [ 'bash-0:4.4.20-1.el8_4.*', '!bind-32:9.11.26-4.el8_4.*' ] + description: Locklist before module execution. + returned: success + type: list + elements: str + sample: ["bash-0:4.4.20-1.el8_4.*", "!bind-32:9.11.26-4.el8_4.*"] locklist_post: - description: Locklist after module execution. - returned: success and (not check mode or state is clean) - type: list - elements: str - sample: [ 'bash-0:4.4.20-1.el8_4.*' ] + description: Locklist after module execution. + returned: success and (not check mode or state is clean) + type: list + elements: str + sample: ["bash-0:4.4.20-1.el8_4.*"] specs_toadd: - description: Package name specs meant to be added by versionlock. - returned: success - type: list - elements: str - sample: [ 'bash' ] + description: Package name specs meant to be added by versionlock. + returned: success + type: list + elements: str + sample: ["bash"] specs_todelete: - description: Package name specs meant to be deleted by versionlock. - returned: success - type: list - elements: str - sample: [ 'bind' ] -''' + description: Package name specs meant to be deleted by versionlock. + returned: success + type: list + elements: str + sample: ["bind"] +""" from ansible.module_utils.basic import AnsibleModule import fnmatch @@ -143,8 +136,7 @@ import re DNF_BIN = "/usr/bin/dnf" VERSIONLOCK_CONF = "/etc/dnf/plugins/versionlock.conf" # NEVRA regex. -NEVRA_RE = re.compile(r"^(?P.+)-(?P\d+):(?P.+)-" - r"(?P.+)\.(?P.+)$") +NEVRA_RE = re.compile(r"^(?P.+)-(?P\d+):(?P.+)-(?P.+)\.(?P.+)$") def do_versionlock(module, command, patterns=None, raw=False): @@ -185,6 +177,7 @@ def match(entry, pattern): m = NEVRA_RE.match(entry) if not m: return False + # indexing a match object with [] is a Python 3.6+ construct for name in ( '%s' % m["name"], '%s.%s' % (m["name"], m["arch"]), @@ -228,6 +221,43 @@ def get_packages(module, patterns, only_installed=False): return packages_available_map_name_evrs +def get_package_mgr(): + for bin_path in (DNF_BIN,): + if os.path.exists(bin_path): + return "dnf5" if os.path.realpath(bin_path) == "/usr/bin/dnf5" else "dnf" + # fallback to dnf + return "dnf" + + +def get_package_list(module, package_mgr="dnf"): + if package_mgr == "dnf": + return do_versionlock(module, "list").split() + + package_list = [] + if package_mgr == "dnf5": + stanza_start = False + package_name = None + for line in do_versionlock(module, "list").splitlines(): + if line.startswith(("#", " ")): + continue + if line.startswith("Package name:"): + stanza_start = True + dummy, name = line.split(":", 1) + name = name.strip() + pkg_name = get_packages(module, patterns=[name]) + package_name = "%s-%s.*" % (name, pkg_name[name].pop()) + if package_name and package_name not in package_list: + package_list.append(package_name) + if line.startswith("evr"): + dummy, package_version = line.split("=", 1) + package_version = package_version.strip() + if stanza_start: + if package_name and package_name not in package_list: + package_list.append(package_name) + stanza_start = False + return package_list + + def main(): module = AnsibleModule( argument_spec=dict( @@ -246,9 +276,10 @@ def main(): msg = "" # Check module pre-requisites. - if not os.path.exists(DNF_BIN): - module.fail_json(msg="%s was not found" % DNF_BIN) - if not os.path.exists(VERSIONLOCK_CONF): + global DNF_BIN + DNF_BIN = module.get_bin_path('dnf', True) + package_mgr = get_package_mgr() + if package_mgr == "dnf" and not os.path.exists(VERSIONLOCK_CONF): module.fail_json(msg="plugin versionlock is required") # Check incompatible options. @@ -257,7 +288,7 @@ def main(): if state != "clean" and not patterns: module.fail_json(msg="name list is required for %s state" % state) - locklist_pre = do_versionlock(module, "list").split() + locklist_pre = get_package_list(module, package_mgr=package_mgr) specs_toadd = [] specs_todelete = [] @@ -267,8 +298,7 @@ def main(): if raw: # Add raw patterns as specs to add. for p in patterns: - if ((p if state == "present" else "!" + p) - not in locklist_pre): + if (p if state == "present" else "!" + p) not in locklist_pre: specs_toadd.append(p) else: # Get available packages that match the patterns. @@ -292,8 +322,7 @@ def main(): for evr in packages_map_name_evrs[name]: locklist_entry = "%s-%s.*" % (name, evr) - if (locklist_entry if state == "present" - else "!%s" % locklist_entry) not in locklist_pre: + if (locklist_entry if state == "present" else "!%s" % locklist_entry) not in locklist_pre: specs_toadd.append(locklist_entry) if specs_toadd and not module.check_mode: @@ -336,7 +365,7 @@ def main(): "specs_todelete": specs_todelete } if not module.check_mode: - response["locklist_post"] = do_versionlock(module, "list").split() + response["locklist_post"] = get_package_list(module, package_mgr=package_mgr) else: if state == "clean": response["locklist_post"] = [] diff --git a/plugins/modules/dnsimple.py b/plugins/modules/dnsimple.py index e96c22613f..1e9fc8f317 100644 --- a/plugins/modules/dnsimple.py +++ b/plugins/modules/dnsimple.py @@ -1,41 +1,45 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright Ansible Project # # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: dnsimple short_description: Interface with dnsimple.com (a DNS hosting service) description: - - "Manages domains and records via the DNSimple API, see the docs: U(http://developer.dnsimple.com/)." + - 'Manages domains and records using the DNSimple API, see the docs: U(http://developer.dnsimple.com/).' +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: account_email: description: - - Account email. If omitted, the environment variables C(DNSIMPLE_EMAIL) and C(DNSIMPLE_API_TOKEN) will be looked for. - - "If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)." - - "C(.dnsimple) config files are only supported in dnsimple-python<2.0.0" + - Account email. If omitted, the environment variables E(DNSIMPLE_EMAIL) and E(DNSIMPLE_API_TOKEN) are looked for. + - 'If those variables are not found, a C(.dnsimple) file is looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started).' + - C(.dnsimple) config files are only supported in dnsimple-python<2.0.0. type: str account_api_token: description: - - Account API token. See I(account_email) for more information. + - Account API token. See O(account_email) for more information. type: str domain: description: - - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNSimple. - - If omitted, a list of domains will be returned. - - If domain is present but the domain doesn't exist, it will be created. + - Domain to work with. Can be the domain name (for example V(mydomain.com)) or the numeric ID of the domain in DNSimple. + - If omitted, a list of domains is returned. + - If domain is present but the domain does not exist, it is created. type: str record: description: - - Record to add, if blank a record for the domain will be created, supports the wildcard (*). + - Record to add, if blank a record for the domain is created, supports the wildcard (*). type: str record_ids: description: @@ -45,7 +49,23 @@ options: type: description: - The type of DNS record to create. - choices: [ 'A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL', 'CAA' ] + choices: + - A + - ALIAS + - CNAME + - MX + - SPF + - URL + - TXT + - NS + - SRV + - NAPTR + - PTR + - AAAA + - SSHFP + - HINFO + - POOL + - CAA type: str ttl: description: @@ -63,14 +83,14 @@ options: type: int state: description: - - whether the record should exist or not. - choices: [ 'present', 'absent' ] + - Whether the record should exist or not. + choices: ['present', 'absent'] default: present type: str solo: description: - Whether the record should be the only one for that record type and record name. - - Only use with C(state) is set to C(present) on a record. + - Only use with O(state) is set to V(present) on a record. type: 'bool' default: false sandbox: @@ -84,9 +104,9 @@ options: requirements: - "dnsimple >= 2.0.0" author: "Alex Coomans (@drcapulet)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Authenticate using email and API token and fetch all domains community.general.dnsimple: account_email: test@example.com @@ -142,9 +162,9 @@ EXAMPLES = ''' value: example.com state: absent delegate_to: localhost -''' +""" -RETURN = r"""# """ +RETURN = r"""#""" import traceback import re @@ -168,10 +188,10 @@ class DNSimpleV2(): def dnsimple_client(self): """creates a dnsimple client object""" if self.account_email and self.account_api_token: - client = Client(sandbox=self.sandbox, email=self.account_email, access_token=self.account_api_token) + client = Client(sandbox=self.sandbox, email=self.account_email, access_token=self.account_api_token, user_agent="ansible/community.general") else: msg = "Option account_email or account_api_token not provided. " \ - "Dnsimple authentiction with a .dnsimple config file is not " \ + "Dnsimple authentication with a .dnsimple config file is not " \ "supported with dnsimple-python>=2.0.0" raise DNSimpleException(msg) client.identity.whoami() @@ -218,24 +238,24 @@ class DNSimpleV2(): self.client.domains.delete_domain(self.account.id, domain) def get_records(self, zone, dnsimple_filter=None): - """return dns ressource records which match a specified filter""" + """return dns resource records which match a specified filter""" records_list = self._get_paginated_result(self.client.zones.list_records, account_id=self.account.id, zone=zone, filter=dnsimple_filter) return [d.__dict__ for d in records_list] def delete_record(self, domain, rid): - """delete a single dns ressource record""" + """delete a single dns resource record""" self.client.zones.delete_record(self.account.id, domain, rid) def update_record(self, domain, rid, ttl=None, priority=None): - """update a single dns ressource record""" + """update a single dns resource record""" zr = ZoneRecordUpdateInput(ttl=ttl, priority=priority) result = self.client.zones.update_record(self.account.id, str(domain), str(rid), zr).data.__dict__ return result def create_record(self, domain, name, record_type, content, ttl=None, priority=None): - """create a single dns ressource record""" + """create a single dns resource record""" zr = ZoneRecordInput(name=name, type=record_type, content=content, ttl=ttl, priority=priority) return self.client.zones.create_record(self.account.id, str(domain), zr).data.__dict__ diff --git a/plugins/modules/dnsimple_info.py b/plugins/modules/dnsimple_info.py index 52fd53303f..64cc4527a6 100644 --- a/plugins/modules/dnsimple_info.py +++ b/plugins/modules/dnsimple_info.py @@ -1,16 +1,13 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright Edward Hilgendorf, # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: dnsimple_info short_description: Pull basic info from DNSimple API @@ -20,45 +17,45 @@ version_added: "4.2.0" description: Retrieve existing records and domains from DNSimple API. extends_documentation_fragment: - - community.general.attributes - - community.general.attributes.info_module + - community.general.attributes + - community.general.attributes.info_module options: - name: - description: - - The domain name to retrieve info from. - - Will return all associated records for this domain if specified. - - If not specified, will return all domains associated with the account ID. - type: str + name: + description: + - The domain name to retrieve info from. + - Returns all associated records for this domain if specified. + - If not specified, returns all domains associated with the account ID. + type: str - account_id: - description: The account ID to query. - required: true - type: str + account_id: + description: The account ID to query. + required: true + type: str - api_key: - description: The API key to use. - required: true - type: str + api_key: + description: The API key to use. + required: true + type: str - record: - description: - - The record to find. - - If specified, only this record will be returned instead of all records. - required: false - type: str + record: + description: + - The record to find. + - If specified, only this record is returned instead of all records. + required: false + type: str - sandbox: - description: Whether or not to use sandbox environment. - required: false - default: false - type: bool + sandbox: + description: Whether or not to use sandbox environment. + required: false + default: false + type: bool author: - - Edward Hilgendorf (@edhilgendorf) -''' + - Edward Hilgendorf (@edhilgendorf) +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Get all domains from an account community.general.dnsimple_info: account_id: "1234" @@ -76,15 +73,15 @@ EXAMPLES = r''' record: "subdomain" account_id: "1234" api_key: "1234" -''' +""" -RETURN = r''' +RETURN = r""" dnsimple_domain_info: - description: Returns a list of dictionaries of all domains associated with the supplied account ID. - type: list - elements: dict - returned: success when I(name) is not specified - sample: + description: Returns a list of dictionaries of all domains associated with the supplied account ID. + type: list + elements: dict + returned: success when O(name) is not specified + sample: - account_id: 1234 created_at: '2021-10-16T21:25:42Z' id: 123456 @@ -93,41 +90,41 @@ dnsimple_domain_info: reverse: false secondary: false updated_at: '2021-11-10T20:22:50Z' - contains: - account_id: - description: The account ID. - type: int - created_at: - description: When the domain entry was created. - type: str - id: - description: ID of the entry. - type: int - last_transferred_at: - description: Date the domain was transferred, or empty if not. - type: str - name: - description: Name of the record. - type: str - reverse: - description: Whether or not it is a reverse zone record. - type: bool - updated_at: - description: When the domain entry was updated. - type: str + contains: + account_id: + description: The account ID. + type: int + created_at: + description: When the domain entry was created. + type: str + id: + description: ID of the entry. + type: int + last_transferred_at: + description: Date the domain was transferred, or empty if not. + type: str + name: + description: Name of the record. + type: str + reverse: + description: Whether or not it is a reverse zone record. + type: bool + updated_at: + description: When the domain entry was updated. + type: str dnsimple_records_info: - description: Returns a list of dictionaries with all records for the domain supplied. - type: list - elements: dict - returned: success when I(name) is specified, but I(record) is not - sample: + description: Returns a list of dictionaries with all records for the domain supplied. + type: list + elements: dict + returned: success when O(name) is specified, but O(record) is not + sample: - content: ns1.dnsimple.com admin.dnsimple.com created_at: '2021-10-16T19:07:34Z' id: 12345 name: 'catheadbiscuit' - parent_id: null - priority: null + parent_id: + priority: regions: - global system_record: true @@ -135,55 +132,55 @@ dnsimple_records_info: type: SOA updated_at: '2021-11-15T23:55:51Z' zone_id: example.com - contains: - content: - description: Content of the returned record. - type: str - created_at: - description: When the domain entry was created. - type: str - id: - description: ID of the entry. - type: int - name: - description: Name of the record. - type: str - parent_id: - description: Parent record or null. - type: int - priority: - description: Priority setting of the record. - type: str - regions: - description: List of regions where the record is available. - type: list - system_record: - description: Whether or not it is a system record. - type: bool - ttl: - description: Record TTL. - type: int - type: - description: Record type. - type: str - updated_at: - description: When the domain entry was updated. - type: str - zone_id: - description: ID of the zone that the record is associated with. - type: str + contains: + content: + description: Content of the returned record. + type: str + created_at: + description: When the domain entry was created. + type: str + id: + description: ID of the entry. + type: int + name: + description: Name of the record. + type: str + parent_id: + description: Parent record or null. + type: int + priority: + description: Priority setting of the record. + type: str + regions: + description: List of regions where the record is available. + type: list + system_record: + description: Whether or not it is a system record. + type: bool + ttl: + description: Record TTL. + type: int + type: + description: Record type. + type: str + updated_at: + description: When the domain entry was updated. + type: str + zone_id: + description: ID of the zone that the record is associated with. + type: str dnsimple_record_info: - description: Returns a list of dictionaries that match the record supplied. - returned: success when I(name) and I(record) are specified - type: list - elements: dict - sample: + description: Returns a list of dictionaries that match the record supplied. + returned: success when O(name) and O(record) are specified + type: list + elements: dict + sample: - content: 1.2.3.4 created_at: '2021-11-15T23:55:51Z' id: 123456 name: catheadbiscuit - parent_id: null - priority: null + parent_id: + priority: regions: - global system_record: false @@ -191,44 +188,44 @@ dnsimple_record_info: type: A updated_at: '2021-11-15T23:55:51Z' zone_id: example.com - contains: - content: - description: Content of the returned record. - type: str - created_at: - description: When the domain entry was created. - type: str - id: - description: ID of the entry. - type: int - name: - description: Name of the record. - type: str - parent_id: - description: Parent record or null. - type: int - priority: - description: Priority setting of the record. - type: str - regions: - description: List of regions where the record is available. - type: list - system_record: - description: Whether or not it is a system record. - type: bool - ttl: - description: Record TTL. - type: int - type: - description: Record type. - type: str - updated_at: - description: When the domain entry was updated. - type: str - zone_id: - description: ID of the zone that the record is associated with. - type: str -''' + contains: + content: + description: Content of the returned record. + type: str + created_at: + description: When the domain entry was created. + type: str + id: + description: ID of the entry. + type: int + name: + description: Name of the record. + type: str + parent_id: + description: Parent record or null. + type: int + priority: + description: Priority setting of the record. + type: str + regions: + description: List of regions where the record is available. + type: list + system_record: + description: Whether or not it is a system record. + type: bool + ttl: + description: Record TTL. + type: int + type: + description: Record type. + type: str + updated_at: + description: When the domain entry was updated. + type: str + zone_id: + description: ID of the zone that the record is associated with. + type: str +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils import deps @@ -239,9 +236,9 @@ with deps.declare("requests"): def build_url(account, key, is_sandbox): headers = {'Accept': 'application/json', - 'Authorization': 'Bearer ' + key} - url = 'https://api{sandbox}.dnsimple.com/'.format( - sandbox=".sandbox" if is_sandbox else "") + 'v2/' + account + 'Authorization': 'Bearer {0}'.format(key)} + sandbox = '.sandbox' if is_sandbox else '' + url = 'https://api{sandbox}.dnsimple.com/v2/{account}'.format(sandbox=sandbox, account=account) req = Request(url=url, headers=headers) prepped_request = req.prepare() return prepped_request @@ -250,19 +247,21 @@ def build_url(account, key, is_sandbox): def iterate_data(module, request_object): base_url = request_object.url response = Session().send(request_object) - if 'pagination' in response.json(): - data = response.json()["data"] - pages = response.json()["pagination"]["total_pages"] - if int(pages) > 1: - for page in range(1, pages): - page = page + 1 - request_object.url = base_url + '&page=' + str(page) - new_results = Session().send(request_object) - data = data + new_results.json()["data"] - return data - else: + if 'pagination' not in response.json(): module.fail_json('API Call failed, check ID, key and sandbox values') + data = response.json()["data"] + total_pages = response.json()["pagination"]["total_pages"] + page = 1 + + while page < total_pages: + page = page + 1 + request_object.url = '{url}&page={page}'.format(url=base_url, page=page) + new_results = Session().send(request_object) + data = data + new_results.json()['data'] + + return data + def record_info(dnsimple_mod, req_obj): req_obj.url, req_obj.method = req_obj.url + '/zones/' + dnsimple_mod.params["name"] + '/records?name=' + dnsimple_mod.params["record"], 'GET' diff --git a/plugins/modules/dnsmadeeasy.py b/plugins/modules/dnsmadeeasy.py index b775f24ab0..e74e8a547b 100644 --- a/plugins/modules/dnsmadeeasy.py +++ b/plugins/modules/dnsmadeeasy.py @@ -1,22 +1,25 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: dnsmadeeasy short_description: Interface with dnsmadeeasy.com (a DNS hosting service) description: - - > - Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or - monitor/account support yet. See: U(https://www.dnsmadeeasy.com/integration/restapi/) + - 'Manages DNS records using the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation + of domains or monitor/account support yet. See: U(https://www.dnsmadeeasy.com/integration/restapi/).' +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: account_key: description: @@ -32,8 +35,8 @@ options: domain: description: - - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster - resolution + - Domain to work with. Can be the domain name (for example V(mydomain.com)) or the numeric ID of the domain in DNS Made + Easy (for example V(839989)) for faster resolution. required: true type: str @@ -45,49 +48,47 @@ options: record_name: description: - - Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless - of the state argument. + - Record name to get/create/delete/update. If O(record_name) is not specified; all records for the domain are returned + in "result" regardless of the state argument. type: str record_type: description: - Record type. - choices: [ 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT' ] + choices: ['A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT'] type: str record_value: description: - - > - Record value. HTTPRED: , MX: , NS: , PTR: , - SRV: , TXT: " - - > - If record_value is not specified; no changes will be made and the record will be returned in 'result' - (in other words, this module can be used to fetch a record's current id, type, and ttl) + - 'Record value. HTTPRED: , MX: , NS: , PTR: , SRV: + , TXT: ".' + - If O(record_value) is not specified; no changes are made and the record is returned in RV(ignore:result) (in other + words, this module can be used to fetch a record's current ID, type, and TTL). type: str record_ttl: description: - - record's "Time to live". Number of seconds the record remains cached in DNS servers. + - Record's "Time-To-Live". Number of seconds the record remains cached in DNS servers. default: 1800 type: int state: description: - - whether the record should exist or not + - Whether the record should exist or not. required: true - choices: [ 'present', 'absent' ] + choices: ['present', 'absent'] type: str validate_certs: description: - - If C(false), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. type: bool default: true monitor: description: - - If C(true), add or change the monitor. This is applicable only for A records. + - If V(true), add or change the monitor. This is applicable only for A records. type: bool default: false @@ -125,8 +126,8 @@ options: contactList: description: - - Name or id of the contact list that the monitor will notify. - - The default C('') means the Account Owner. + - Name or ID of the contact list that the monitor notifies. + - The default V('') means the Account Owner. type: str httpFqdn: @@ -146,7 +147,7 @@ options: failover: description: - - If C(true), add or change the failover. This is applicable only for A records. + - If V(true), add or change the failover. This is applicable only for A records. type: bool default: false @@ -185,20 +186,19 @@ options: type: str notes: - - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few - seconds of actual time by using NTP. - - This module returns record(s) and monitor(s) in the "result" element when 'state' is set to 'present'. - These values can be be registered and used in your playbooks. - - Only A records can have a monitor or failover. - - To add failover, the 'failover', 'autoFailover', 'port', 'protocol', 'ip1', and 'ip2' options are required. - - To add monitor, the 'monitor', 'port', 'protocol', 'maxEmails', 'systemDescription', and 'ip1' options are required. - - The monitor and the failover will share 'port', 'protocol', and 'ip1' options. - -requirements: [ hashlib, hmac ] + - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure + you are within a few seconds of actual time by using NTP. + - This module returns record(s) and monitor(s) in the RV(ignore:result) element when O(state=present). These values can + be be registered and used in your playbooks. + - Only A records can have a O(monitor) or O(failover). + - To add failover, the O(failover), O(autoFailover), O(port), O(protocol), O(ip1), and O(ip2) options are required. + - To add monitor, the O(monitor), O(port), O(protocol), O(maxEmails), O(systemDescription), and O(ip1) options are required. + - The options O(monitor) and O(failover) share O(port), O(protocol), and O(ip1) options. +requirements: [hashlib, hmac] author: "Brice Burgess (@briceburg)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Fetch my.com domain records community.general.dnsmadeeasy: account_key: key @@ -284,8 +284,8 @@ EXAMPLES = ''' record_value: 127.0.0.1 monitor: true ip1: 127.0.0.2 - protocol: HTTP # default - port: 80 # default + protocol: HTTP # default + port: 80 # default maxEmails: 1 systemDescription: Monitor Test A record contactList: my contact list @@ -301,11 +301,11 @@ EXAMPLES = ''' record_value: 127.0.0.1 monitor: true ip1: 127.0.0.2 - protocol: HTTP # default - port: 80 # default + protocol: HTTP # default + port: 80 # default maxEmails: 1 systemDescription: Monitor Test A record - contactList: 1174 # contact list id + contactList: 1174 # contact list id httpFqdn: http://my.com httpFile: example httpQueryString: some string @@ -350,7 +350,7 @@ EXAMPLES = ''' record_type: A record_value: 127.0.0.1 monitor: false -''' +""" # ============================================ # DNSMadeEasy module specific support methods. @@ -361,11 +361,10 @@ import hashlib import hmac import locale from time import strftime, gmtime +from urllib.parse import urlencode from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.six import string_types class DME2(object): @@ -415,7 +414,7 @@ class DME2(object): def query(self, resource, method, data=None): url = self.baseurl + resource - if data and not isinstance(data, string_types): + if data and not isinstance(data, str): data = urlencode(data) response, info = fetch_url(self.module, url, data=data, method=method, headers=self._headers()) @@ -484,7 +483,7 @@ class DME2(object): return self.query(self.record_url, 'GET')['data'] def _instMap(self, type): - # @TODO cache this call so it's executed only once per ansible execution + # @TODO cache this call so it is executed only once per ansible execution map = {} results = {} @@ -502,15 +501,15 @@ class DME2(object): return json.dumps(data, separators=(',', ':')) def createRecord(self, data): - # @TODO update the cache w/ resultant record + id when impleneted + # @TODO update the cache w/ resultant record + id when implemented return self.query(self.record_url, 'POST', data) def updateRecord(self, record_id, data): - # @TODO update the cache w/ resultant record + id when impleneted + # @TODO update the cache w/ resultant record + id when implemented return self.query(self.record_url + '/' + str(record_id), 'PUT', data) def deleteRecord(self, record_id): - # @TODO remove record from the cache when impleneted + # @TODO remove record from the cache when implemented return self.query(self.record_url + '/' + str(record_id), 'DELETE') def getMonitor(self, record_id): @@ -551,28 +550,28 @@ def main(): domain=dict(required=True), sandbox=dict(default=False, type='bool'), state=dict(required=True, choices=['present', 'absent']), - record_name=dict(required=False), - record_type=dict(required=False, choices=[ + record_name=dict(), + record_type=dict(choices=[ 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']), - record_value=dict(required=False), - record_ttl=dict(required=False, default=1800, type='int'), + record_value=dict(), + record_ttl=dict(default=1800, type='int'), monitor=dict(default=False, type='bool'), systemDescription=dict(default=''), maxEmails=dict(default=1, type='int'), protocol=dict(default='HTTP', choices=['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']), port=dict(default=80, type='int'), sensitivity=dict(default='Medium', choices=['Low', 'Medium', 'High']), - contactList=dict(default=None), - httpFqdn=dict(required=False), - httpFile=dict(required=False), - httpQueryString=dict(required=False), + contactList=dict(), + httpFqdn=dict(), + httpFile=dict(), + httpQueryString=dict(), failover=dict(default=False, type='bool'), autoFailover=dict(default=False, type='bool'), - ip1=dict(required=False), - ip2=dict(required=False), - ip3=dict(required=False), - ip4=dict(required=False), - ip5=dict(required=False), + ip1=dict(), + ip2=dict(), + ip3=dict(), + ip4=dict(), + ip5=dict(), validate_certs=dict(default=True, type='bool'), ), required_together=[ diff --git a/plugins/modules/dpkg_divert.py b/plugins/modules/dpkg_divert.py index 52e6c33d0d..7f37a47de4 100644 --- a/plugins/modules/dpkg_divert.py +++ b/plugins/modules/dpkg_divert.py @@ -1,91 +1,82 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017-2020, Yann Amar # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: dpkg_divert short_description: Override a debian package's version of a file version_added: '0.2.0' author: - quidame (@quidame) description: - - A diversion is for C(dpkg) the knowledge that only a given package - (or the local administrator) is allowed to install a file at a given - location. Other packages shipping their own version of this file will - be forced to I(divert) it, i.e. to install it at another location. It - allows one to keep changes in a file provided by a debian package by - preventing its overwrite at package upgrade. - - This module manages diversions of debian packages files using the - C(dpkg-divert) commandline tool. It can either create or remove a - diversion for a given file, but also update an existing diversion - to modify its I(holder) and/or its I(divert) location. + - A diversion is for C(dpkg) the knowledge that only a given package (or the local administrator) is allowed to install + a file at a given location. Other packages shipping their own version of this file are forced to O(divert) it, that is + to install it at another location. It allows one to keep changes in a file provided by a debian package by preventing + it being overwritten on package upgrade. + - This module manages diversions of debian packages files using the C(dpkg-divert) commandline tool. It can either create + or remove a diversion for a given file, but also update an existing diversion to modify its O(holder) and/or its O(divert) + location. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full options: path: description: - - The original and absolute path of the file to be diverted or - undiverted. This path is unique, i.e. it is not possible to get - two diversions for the same I(path). + - The original and absolute path of the file to be diverted or undiverted. This path is unique, in other words it is + not possible to get two diversions for the same O(path). required: true type: path state: description: - - When I(state=absent), remove the diversion of the specified - I(path); when I(state=present), create the diversion if it does - not exist, or update its package I(holder) or I(divert) location, - if it already exists. + - When O(state=absent), remove the diversion of the specified O(path); when O(state=present), create the diversion if + it does not exist, or update its package O(holder) or O(divert) location, if it already exists. type: str default: present choices: [absent, present] holder: description: - - The name of the package whose copy of file is not diverted, also - known as the diversion holder or the package the diversion belongs - to. - - The actual package does not have to be installed or even to exist - for its name to be valid. If not specified, the diversion is hold - by 'LOCAL', that is reserved by/for dpkg for local diversions. - - This parameter is ignored when I(state=absent). + - The name of the package whose copy of file is not diverted, also known as the diversion holder or the package the + diversion belongs to. + - The actual package does not have to be installed or even to exist for its name to be valid. If not specified, the + diversion is hold by 'LOCAL', that is reserved by/for dpkg for local diversions. + - This parameter is ignored when O(state=absent). type: str divert: description: - - The location where the versions of file will be diverted. + - The location where the versions of file are diverted. - Default is to add suffix C(.distrib) to the file path. - - This parameter is ignored when I(state=absent). + - This parameter is ignored when O(state=absent). type: path rename: description: - - Actually move the file aside (when I(state=present)) or back (when - I(state=absent)), but only when changing the state of the diversion. - This parameter has no effect when attempting to add a diversion that - already exists or when removing an unexisting one. - - Unless I(force=true), renaming fails if the destination file already - exists (this lock being a dpkg-divert feature, and bypassing it being - a module feature). + - Actually move the file aside (when O(state=present)) or back (when O(state=absent)), but only when changing the state + of the diversion. This parameter has no effect when attempting to add a diversion that already exists or when removing + an unexisting one. + - Unless O(force=true), renaming fails if the destination file already exists (this lock being a dpkg-divert feature, + and bypassing it being a module feature). type: bool default: false force: description: - - When I(rename=true) and I(force=true), renaming is performed even if - the target of the renaming exists, i.e. the existing contents of the - file at this location will be lost. - - This parameter is ignored when I(rename=false). + - When O(rename=true) and O(force=true), renaming is performed even if the target of the renaming exists, in other words + the existing contents of the file at this location are lost. + - This parameter is ignored when O(rename=false). type: bool default: false -notes: - - This module supports I(check_mode) and I(diff). requirements: - dpkg-divert >= 1.15.0 (Debian family) -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Divert /usr/bin/busybox to /usr/bin/busybox.distrib and keep file in place community.general.dpkg_divert: path: /usr/bin/busybox @@ -107,9 +98,9 @@ EXAMPLES = r''' state: absent rename: true force: true -''' +""" -RETURN = r''' +RETURN = r""" commands: description: The dpkg-divert commands ran internally by the module. type: list @@ -146,7 +137,7 @@ diversion: "path": "/etc/foobarrc", "state": "present" } -''' +""" import re @@ -173,11 +164,11 @@ def main(): module = AnsibleModule( argument_spec=dict( path=dict(required=True, type='path'), - state=dict(required=False, type='str', default='present', choices=['absent', 'present']), - holder=dict(required=False, type='str'), - divert=dict(required=False, type='path'), - rename=dict(required=False, type='bool', default=False), - force=dict(required=False, type='bool', default=False), + state=dict(type='str', default='present', choices=['absent', 'present']), + holder=dict(type='str'), + divert=dict(type='path'), + rename=dict(type='bool', default=False), + force=dict(type='bool', default=False), ), supports_check_mode=True, ) diff --git a/plugins/modules/easy_install.py b/plugins/modules/easy_install.py index 0a3158c926..d533da899f 100644 --- a/plugins/modules/easy_install.py +++ b/plugins/modules/easy_install.py @@ -1,73 +1,68 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2012, Matt Wright # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: easy_install short_description: Installs Python libraries description: - - Installs Python libraries, optionally in a I(virtualenv) + - Installs Python libraries, optionally in a C(virtualenv). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: name: type: str description: - - A Python library name + - A Python library name. required: true virtualenv: type: str description: - - an optional I(virtualenv) directory path to install into. If the - I(virtualenv) does not exist, it is created automatically + - An optional O(virtualenv) directory path to install into. If the O(virtualenv) does not exist, it is created automatically. virtualenv_site_packages: description: - - Whether the virtual environment will inherit packages from the - global site-packages directory. Note that if this setting is - changed on an already existing virtual environment it will not - have any effect, the environment must be deleted and newly - created. + - Whether the virtual environment inherits packages from the global site-packages directory. Note that this setting + has no effect on an already existing virtual environment, so if you want to change it, the environment must be deleted + and newly created. type: bool default: false virtualenv_command: type: str description: - - The command to create the virtual environment with. For example - C(pyvenv), C(virtualenv), C(virtualenv2). + - The command to create the virtual environment with. For example V(pyvenv), V(virtualenv), V(virtualenv2). default: virtualenv executable: type: str description: - - The explicit executable or a pathname to the executable to be used to - run easy_install for a specific version of Python installed in the - system. For example C(easy_install-3.3), if there are both Python 2.7 - and 3.3 installations in the system and you want to run easy_install - for the Python 3.3 installation. + - The explicit executable or a pathname to the executable to be used to run easy_install for a specific version of Python + installed in the system. For example V(easy_install-3.3), if there are both Python 2.7 and 3.3 installations in the + system and you want to run easy_install for the Python 3.3 installation. default: easy_install state: type: str description: - - The desired state of the library. C(latest) ensures that the latest version is installed. + - The desired state of the library. V(latest) ensures that the latest version is installed. choices: [present, latest] default: present notes: - - Please note that the C(easy_install) module can only install Python - libraries. Thus this module is not able to remove libraries. It is - generally recommended to use the M(ansible.builtin.pip) module which you can first install - using M(community.general.easy_install). - - Also note that I(virtualenv) must be installed on the remote host if the - C(virtualenv) parameter is specified. -requirements: [ "virtualenv" ] + - Please note that the C(easy_install) module can only install Python libraries. Thus this module is not able to remove + libraries. It is generally recommended to use the M(ansible.builtin.pip) module which you can first install using M(community.general.easy_install). + - Also note that C(virtualenv) must be installed on the remote host if the O(virtualenv) parameter is specified. +requirements: ["virtualenv"] author: "Matt Wright (@mattupstate)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install or update pip community.general.easy_install: name: pip @@ -77,7 +72,13 @@ EXAMPLES = ''' community.general.easy_install: name: bottle virtualenv: /webapps/myapp/venv -''' + +- name: Install a python package using pyvenv as the virtualenv tool + community.general.easy_install: + name: package_name + virtualenv: /opt/myenv + virtualenv_command: pyvenv +""" import os import os.path @@ -86,7 +87,7 @@ from ansible.module_utils.basic import AnsibleModule def install_package(module, name, easy_install, executable_arguments): - cmd = '%s %s %s' % (easy_install, ' '.join(executable_arguments), name) + cmd = [easy_install] + executable_arguments + [name] rc, out, err = module.run_command(cmd) return rc, out, err @@ -130,14 +131,13 @@ def _get_easy_install(module, env=None, executable=None): def main(): arg_spec = dict( name=dict(required=True), - state=dict(required=False, - default='present', + state=dict(default='present', choices=['present', 'latest'], type='str'), - virtualenv=dict(default=None, required=False), + virtualenv=dict(), virtualenv_site_packages=dict(default=False, type='bool'), - virtualenv_command=dict(default='virtualenv', required=False), - executable=dict(default='easy_install', required=False), + virtualenv_command=dict(default='virtualenv'), + executable=dict(default='easy_install'), ) module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) diff --git a/plugins/modules/ejabberd_user.py b/plugins/modules/ejabberd_user.py index f7189fa08e..d60a5d4f4a 100644 --- a/plugins/modules/ejabberd_user.py +++ b/plugins/modules/ejabberd_user.py @@ -1,58 +1,56 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (C) 2013, Peter Sprygada # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ejabberd_user author: "Peter Sprygada (@privateip)" short_description: Manages users for ejabberd servers requirements: - - ejabberd with mod_admin_extra + - ejabberd with mod_admin_extra description: - - This module provides user management for ejabberd servers + - This module provides user management for ejabberd servers. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - username: - type: str - description: - - the name of the user to manage - required: true - host: - type: str - description: - - the ejabberd host associated with this username - required: true - password: - type: str - description: - - the password to assign to the username - required: false - logging: - description: - - enables or disables the local syslog facility for this module - required: false - default: false - type: bool - state: - type: str - description: - - describe the desired state of the user to be managed - required: false - default: 'present' - choices: [ 'present', 'absent' ] + username: + type: str + description: + - The name of the user to manage. + required: true + host: + type: str + description: + - The ejabberd host associated with this username. + required: true + password: + type: str + description: + - The password to assign to the username. + required: false + state: + type: str + description: + - Describe the desired state of the user to be managed. + required: false + default: 'present' + choices: ['present', 'absent'] notes: - - Password parameter is required for state == present only - - Passwords must be stored in clear text for this release - - The ejabberd configuration file must include mod_admin_extra as a module. -''' -EXAMPLES = ''' + - Password parameter is required for O(state=present) only. + - Passwords must be stored in clear text for this release. + - The ejabberd configuration file must include mod_admin_extra as a module. +""" +EXAMPLES = r""" # Example playbook entries using the ejabberd_user module to manage users state. - name: Create a user if it does not exist @@ -66,11 +64,10 @@ EXAMPLES = ''' username: test host: server state: absent -''' - -import syslog +""" from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt class EjabberdUser(object): @@ -78,16 +75,26 @@ class EjabberdUser(object): object manages user creation and deletion using ejabberdctl. The following commands are currently supported: * ejabberdctl register - * ejabberdctl deregister + * ejabberdctl unregister """ def __init__(self, module): self.module = module - self.logging = module.params.get('logging') self.state = module.params.get('state') self.host = module.params.get('host') self.user = module.params.get('username') self.pwd = module.params.get('password') + self.runner = CmdRunner( + module, + command="ejabberdctl", + arg_formats=dict( + cmd=cmd_runner_fmt.as_list(), + host=cmd_runner_fmt.as_list(), + user=cmd_runner_fmt.as_list(), + pwd=cmd_runner_fmt.as_list(), + ), + check_rc=False, + ) @property def changed(self): @@ -95,7 +102,7 @@ class EjabberdUser(object): changed. It will return True if the user does not match the supplied credentials and False if it does not """ - return self.run_command('check_password', [self.user, self.host, self.pwd]) + return self.run_command('check_password', 'user host pwd', (lambda rc, out, err: bool(rc))) @property def exists(self): @@ -103,37 +110,42 @@ class EjabberdUser(object): host specified. If the user exists True is returned, otherwise False is returned """ - return self.run_command('check_account', [self.user, self.host]) + return self.run_command('check_account', 'user host', (lambda rc, out, err: not bool(rc))) def log(self, entry): - """ This method will log information to the local syslog facility """ - if self.logging: - syslog.openlog('ansible-%s' % self.module._name) - syslog.syslog(syslog.LOG_NOTICE, entry) + """ This method does nothing """ + pass - def run_command(self, cmd, options): + def run_command(self, cmd, options, process=None): """ This method will run the any command specified and return the returns using the Ansible common module """ - cmd = [self.module.get_bin_path('ejabberdctl'), cmd] + options - self.log('command: %s' % " ".join(cmd)) - return self.module.run_command(cmd) + def _proc(*a): + return a + + if process is None: + process = _proc + + with self.runner("cmd " + options, output_process=process) as ctx: + res = ctx.run(cmd=cmd, host=self.host, user=self.user, pwd=self.pwd) + self.log('command: %s' % " ".join(ctx.run_info['cmd'])) + return res def update(self): """ The update method will update the credentials for the user provided """ - return self.run_command('change_password', [self.user, self.host, self.pwd]) + return self.run_command('change_password', 'user host pwd') def create(self): """ The create method will create a new user on the host with the password provided """ - return self.run_command('register', [self.user, self.host, self.pwd]) + return self.run_command('register', 'user host pwd') def delete(self): """ The delete method will delete the user from the host """ - return self.run_command('unregister', [self.user, self.host]) + return self.run_command('unregister', 'user host') def main(): @@ -143,7 +155,6 @@ def main(): username=dict(required=True, type='str'), password=dict(type='str', no_log=True), state=dict(default='present', choices=['present', 'absent']), - logging=dict(default=False, type='bool') # deprecate in favour of c.g.syslogger? ), required_if=[ ('state', 'present', ['password']), diff --git a/plugins/modules/elasticsearch_plugin.py b/plugins/modules/elasticsearch_plugin.py index a68ff086c3..7d49ebded1 100644 --- a/plugins/modules/elasticsearch_plugin.py +++ b/plugins/modules/elasticsearch_plugin.py @@ -1,90 +1,91 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2015, Mathew Davies # Copyright (c) 2017, Sam Doran # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: elasticsearch_plugin short_description: Manage Elasticsearch plugins description: - - Manages Elasticsearch plugins. + - Manages Elasticsearch plugins. author: - - Mathew Davies (@ThePixelDeveloper) - - Sam Doran (@samdoran) + - Mathew Davies (@ThePixelDeveloper) + - Sam Doran (@samdoran) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - Name of the plugin to install. - required: true - type: str - state: - description: - - Desired state of a plugin. - choices: ["present", "absent"] - default: present - type: str - src: - description: - - Optionally set the source location to retrieve the plugin from. This can be a file:// - URL to install from a local file, or a remote URL. If this is not set, the plugin - location is just based on the name. - - The name parameter must match the descriptor in the plugin ZIP specified. - - Is only used if the state would change, which is solely checked based on the name - parameter. If, for example, the plugin is already installed, changing this has no - effect. - - For ES 1.x use url. - required: false - type: str - url: - description: - - Set exact URL to download the plugin from (Only works for ES 1.x). - - For ES 2.x and higher, use src. - required: false - type: str - timeout: - description: - - "Timeout setting: 30s, 1m, 1h..." - - Only valid for Elasticsearch < 5.0. This option is ignored for Elasticsearch > 5.0. - default: 1m - type: str - force: - description: - - "Force batch mode when installing plugins. This is only necessary if a plugin requires additional permissions and console detection fails." - default: false - type: bool - plugin_bin: - description: - - Location of the plugin binary. If this file is not found, the default plugin binaries will be used. - - The default changed in Ansible 2.4 to None. - type: path - plugin_dir: - description: - - Your configured plugin directory specified in Elasticsearch - default: /usr/share/elasticsearch/plugins/ - type: path - proxy_host: - description: - - Proxy host to use during plugin installation - type: str - proxy_port: - description: - - Proxy port to use during plugin installation - type: str - version: - description: - - Version of the plugin to be installed. - If plugin exists with previous version, it will NOT be updated - type: str -''' + name: + description: + - Name of the plugin to install. + required: true + type: str + state: + description: + - Desired state of a plugin. + choices: ["present", "absent"] + default: present + type: str + src: + description: + - Optionally set the source location to retrieve the plugin from. This can be a C(file://) URL to install from a local + file, or a remote URL. If this is not set, the plugin location is just based on the name. + - The name parameter must match the descriptor in the plugin ZIP specified. + - Is only used if the state would change, which is solely checked based on the name parameter. If, for example, the + plugin is already installed, changing this has no effect. + - For ES 1.x use O(url). + required: false + type: str + url: + description: + - Set exact URL to download the plugin from (Only works for ES 1.x). + - For ES 2.x and higher, use src. + required: false + type: str + timeout: + description: + - 'Timeout setting: V(30s), V(1m), V(1h)...' + - Only valid for Elasticsearch < 5.0. This option is ignored for Elasticsearch > 5.0. + default: 1m + type: str + force: + description: + - Force batch mode when installing plugins. This is only necessary if a plugin requires additional permissions and console + detection fails. + default: false + type: bool + plugin_bin: + description: + - Location of the plugin binary. If this file is not found, the default plugin binaries are used. + type: path + plugin_dir: + description: + - Your configured plugin directory specified in Elasticsearch. + default: /usr/share/elasticsearch/plugins/ + type: path + proxy_host: + description: + - Proxy host to use during plugin installation. + type: str + proxy_port: + description: + - Proxy port to use during plugin installation. + type: str + version: + description: + - Version of the plugin to be installed. If plugin exists with previous version, it is NOT updated. + type: str +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install Elasticsearch Head plugin in Elasticsearch 2.x community.general.elasticsearch_plugin: name: mobz/elasticsearch-head @@ -110,7 +111,7 @@ EXAMPLES = ''' name: ingest-geoip state: present force: true -''' +""" import os @@ -160,33 +161,38 @@ def parse_error(string): def install_plugin(module, plugin_bin, plugin_name, version, src, url, proxy_host, proxy_port, timeout, force): - cmd_args = [plugin_bin, PACKAGE_STATE_MAP["present"]] + cmd = [plugin_bin, PACKAGE_STATE_MAP["present"]] is_old_command = (os.path.basename(plugin_bin) == 'plugin') # Timeout and version are only valid for plugin, not elasticsearch-plugin if is_old_command: if timeout: - cmd_args.append("--timeout %s" % timeout) + cmd.append("--timeout") + cmd.append(timeout) if version: plugin_name = plugin_name + '/' + version - cmd_args[2] = plugin_name + cmd[2] = plugin_name if proxy_host and proxy_port: - cmd_args.append("-DproxyHost=%s -DproxyPort=%s" % (proxy_host, proxy_port)) + java_opts = ["-Dhttp.proxyHost=%s" % proxy_host, + "-Dhttp.proxyPort=%s" % proxy_port, + "-Dhttps.proxyHost=%s" % proxy_host, + "-Dhttps.proxyPort=%s" % proxy_port] + module.run_command_environ_update = dict(CLI_JAVA_OPTS=" ".join(java_opts), # Elasticsearch 8.x + ES_JAVA_OPTS=" ".join(java_opts)) # Older Elasticsearch versions # Legacy ES 1.x if url: - cmd_args.append("--url %s" % url) + cmd.append("--url") + cmd.append(url) if force: - cmd_args.append("--batch") + cmd.append("--batch") if src: - cmd_args.append(src) + cmd.append(src) else: - cmd_args.append(plugin_name) - - cmd = " ".join(cmd_args) + cmd.append(plugin_name) if module.check_mode: rc, out, err = 0, "check mode", "" @@ -201,9 +207,7 @@ def install_plugin(module, plugin_bin, plugin_name, version, src, url, proxy_hos def remove_plugin(module, plugin_bin, plugin_name): - cmd_args = [plugin_bin, PACKAGE_STATE_MAP["absent"], parse_plugin_repo(plugin_name)] - - cmd = " ".join(cmd_args) + cmd = [plugin_bin, PACKAGE_STATE_MAP["absent"], parse_plugin_repo(plugin_name)] if module.check_mode: rc, out, err = 0, "check mode", "" @@ -232,8 +236,8 @@ def get_plugin_bin(module, plugin_bin=None): # Get separate lists of dirs and binary names from the full paths to the # plugin binaries. - plugin_dirs = list(set([os.path.dirname(x) for x in bin_paths])) - plugin_bins = list(set([os.path.basename(x) for x in bin_paths])) + plugin_dirs = list(set(os.path.dirname(x) for x in bin_paths)) + plugin_bins = list(set(os.path.basename(x) for x in bin_paths)) # Check for the binary names in the default system paths as well as the path # specified in the module arguments. @@ -253,15 +257,15 @@ def main(): argument_spec=dict( name=dict(required=True), state=dict(default="present", choices=list(PACKAGE_STATE_MAP.keys())), - src=dict(default=None), - url=dict(default=None), + src=dict(), + url=dict(), timeout=dict(default="1m"), force=dict(type='bool', default=False), plugin_bin=dict(type="path"), plugin_dir=dict(default="/usr/share/elasticsearch/plugins/", type="path"), - proxy_host=dict(default=None), - proxy_port=dict(default=None), - version=dict(default=None) + proxy_host=dict(), + proxy_port=dict(), + version=dict() ), mutually_exclusive=[("src", "url")], supports_check_mode=True diff --git a/plugins/modules/emc_vnx_sg_member.py b/plugins/modules/emc_vnx_sg_member.py index 39c0ab3a99..fce2c59c32 100644 --- a/plugins/modules/emc_vnx_sg_member.py +++ b/plugins/modules/emc_vnx_sg_member.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (c) 2018, Luca 'remix_tj' Lorenzetto # @@ -7,51 +6,52 @@ # SPDX-License-Identifier: GPL-3.0-or-later # -from __future__ import (absolute_import, division, print_function) +from __future__ import annotations -__metaclass__ = type - - -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: emc_vnx_sg_member short_description: Manage storage group member on EMC VNX description: - - "This module manages the members of an existing storage group." - + - This module manages the members of an existing storage group. extends_documentation_fragment: -- community.general.emc.emc_vnx + - community.general.emc.emc_vnx + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - Name of the Storage group to manage. - required: true - type: str - lunid: - description: - - Lun id to be added. - required: true - type: int - state: - description: - - Indicates the desired lunid state. - - C(present) ensures specified lunid is present in the Storage Group. - - C(absent) ensures specified lunid is absent from Storage Group. - default: present - choices: [ "present", "absent"] - type: str + name: + description: + - Name of the Storage group to manage. + required: true + type: str + lunid: + description: + - LUN ID to be added. + required: true + type: int + state: + description: + - Indicates the desired lunid state. + - V(present) ensures specified O(lunid) is present in the Storage Group. + - V(absent) ensures specified O(lunid) is absent from Storage Group. + default: present + choices: ["present", "absent"] + type: str author: - - Luca 'remix_tj' Lorenzetto (@remixtj) -''' + - Luca 'remix_tj' Lorenzetto (@remixtj) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Add lun to storage group community.general.emc_vnx_sg_member: name: sg01 @@ -69,14 +69,14 @@ EXAMPLES = ''' sp_password: sysadmin lunid: 100 state: absent -''' +""" -RETURN = ''' +RETURN = r""" hluid: - description: LUNID that hosts attached to the storage group will see. - type: int - returned: success -''' + description: LUNID visible to hosts attached to the storage group. + type: int + returned: success +""" import traceback diff --git a/plugins/modules/etcd3.py b/plugins/modules/etcd3.py index 2a89c71968..397bb1d767 100644 --- a/plugins/modules/etcd3.py +++ b/plugins/modules/etcd3.py @@ -1,85 +1,89 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (c) 2018, Jean-Philippe Evrard # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: etcd3 short_description: Set or delete key value pairs from an etcd3 cluster requirements: - etcd3 description: - - Sets or deletes values in etcd3 cluster using its v3 api. - - Needs python etcd3 lib to work + - Sets or deletes values in etcd3 cluster using its v3 API. + - Needs python etcd3 lib to work. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - key: - type: str - description: - - the key where the information is stored in the cluster - required: true - value: - type: str - description: - - the information stored - required: true - host: - type: str - description: - - the IP address of the cluster - default: 'localhost' - port: - type: int - description: - - the port number used to connect to the cluster - default: 2379 - state: - type: str - description: - - the state of the value for the key. - - can be present or absent - required: true - choices: [ present, absent ] - user: - type: str - description: - - The etcd user to authenticate with. - password: - type: str - description: - - The password to use for authentication. - - Required if I(user) is defined. - ca_cert: - type: path - description: - - The Certificate Authority to use to verify the etcd host. - - Required if I(client_cert) and I(client_key) are defined. - client_cert: - type: path - description: - - PEM formatted certificate chain file to be used for SSL client authentication. - - Required if I(client_key) is defined. - client_key: - type: path - description: - - PEM formatted file that contains your private key to be used for SSL client authentication. - - Required if I(client_cert) is defined. - timeout: - type: int - description: - - The socket level timeout in seconds. + key: + type: str + description: + - The key where the information is stored in the cluster. + required: true + value: + type: str + description: + - The information stored. + required: true + host: + type: str + description: + - The IP address of the cluster. + default: 'localhost' + port: + type: int + description: + - The port number used to connect to the cluster. + default: 2379 + state: + type: str + description: + - The state of the value for the key. + - Can be present or absent. + required: true + choices: [present, absent] + user: + type: str + description: + - The etcd user to authenticate with. + password: + type: str + description: + - The password to use for authentication. + - Required if O(user) is defined. + ca_cert: + type: path + description: + - The Certificate Authority to use to verify the etcd host. + - Required if O(client_cert) and O(client_key) are defined. + client_cert: + type: path + description: + - PEM formatted certificate chain file to be used for SSL client authentication. + - Required if O(client_key) is defined. + client_key: + type: path + description: + - PEM formatted file that contains your private key to be used for SSL client authentication. + - Required if O(client_cert) is defined. + timeout: + type: int + description: + - The socket level timeout in seconds. author: - - Jean-Philippe Evrard (@evrardjp) - - Victor Fauth (@vfauth) -''' + - Jean-Philippe Evrard (@evrardjp) + - Victor Fauth (@vfauth) +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Store a value "bar" under the key "foo" for a cluster located "http://localhost:2379" community.general.etcd3: key: "foo" @@ -107,16 +111,16 @@ EXAMPLES = """ client_key: "/etc/ssl/private/key.pem" """ -RETURN = ''' +RETURN = r""" key: - description: The key that was queried - returned: always - type: str + description: The key that was queried. + returned: always + type: str old_value: - description: The previous value in the cluster - returned: always - type: str -''' + description: The previous value in the cluster. + returned: always + type: str +""" import traceback @@ -186,13 +190,8 @@ def run_module(): allowed_keys = ['host', 'port', 'ca_cert', 'cert_cert', 'cert_key', 'timeout', 'user', 'password'] - # TODO(evrardjp): Move this back to a dict comprehension when python 2.7 is - # the minimum supported version - # client_params = {key: value for key, value in module.params.items() if key in allowed_keys} - client_params = dict() - for key, value in module.params.items(): - if key in allowed_keys: - client_params[key] = value + + client_params = {key: value for key, value in module.params.items() if key in allowed_keys} try: etcd = etcd3.client(**client_params) except Exception as exp: diff --git a/plugins/modules/facter.py b/plugins/modules/facter.py deleted file mode 100644 index be45e3ce14..0000000000 --- a/plugins/modules/facter.py +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2012, Michael DeHaan -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' ---- -module: facter -short_description: Runs the discovery program I(facter) on the remote system -description: - - Runs the C(facter) discovery program - (U(https://github.com/puppetlabs/facter)) on the remote system, returning - JSON data that can be useful for inventory purposes. -options: - arguments: - description: - - Specifies arguments for facter. - type: list - elements: str -requirements: - - facter - - ruby-json -author: - - Ansible Core Team - - Michael DeHaan -''' - -EXAMPLES = ''' -# Example command-line invocation -# ansible www.example.net -m facter - -- name: Execute facter no arguments - community.general.facter: - -- name: Execute facter with arguments - community.general.facter: - arguments: - - -p - - system_uptime - - timezone - - is_virtual -''' -import json - -from ansible.module_utils.basic import AnsibleModule - - -def main(): - module = AnsibleModule( - argument_spec=dict( - arguments=dict(required=False, type='list', elements='str') - ) - ) - - facter_path = module.get_bin_path( - 'facter', - opt_dirs=['/opt/puppetlabs/bin']) - - cmd = [facter_path, "--json"] - if module.params['arguments']: - cmd += module.params['arguments'] - - rc, out, err = module.run_command(cmd, check_rc=True) - module.exit_json(**json.loads(out)) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/facter_facts.py b/plugins/modules/facter_facts.py new file mode 100644 index 0000000000..8ef5d7776b --- /dev/null +++ b/plugins/modules/facter_facts.py @@ -0,0 +1,86 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Alexei Znamensky +# Copyright (c) 2012, Michael DeHaan +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: facter_facts +short_description: Runs the discovery program C(facter) on the remote system and return Ansible facts +version_added: 8.0.0 +description: + - Runs the C(facter) discovery program (U(https://github.com/puppetlabs/facter)) on the remote system, returning Ansible + facts from the JSON data that can be useful for inventory purposes. +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.facts + - community.general.attributes.facts_module +options: + arguments: + description: + - Specifies arguments for facter. + type: list + elements: str +requirements: + - facter + - ruby-json +author: + - Ansible Core Team + - Michael DeHaan +""" + +EXAMPLES = r""" +- name: Execute facter no arguments + community.general.facter_facts: + +- name: Execute facter with arguments + community.general.facter_facts: + arguments: + - -p + - system_uptime + - timezone + - is_virtual +""" + +RETURN = r""" +ansible_facts: + description: Dictionary with one key C(facter). + returned: always + type: dict + contains: + facter: + description: Dictionary containing facts discovered in the remote system. + returned: always + type: dict +""" + +import json + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + arguments=dict(type='list', elements='str'), + ), + supports_check_mode=True, + ) + + facter_path = module.get_bin_path( + 'facter', + opt_dirs=['/opt/puppetlabs/bin']) + + cmd = [facter_path, "--json"] + if module.params['arguments']: + cmd += module.params['arguments'] + + rc, out, err = module.run_command(cmd, check_rc=True) + module.exit_json(ansible_facts=dict(facter=json.loads(out))) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/filesize.py b/plugins/modules/filesize.py index a8c47d70fd..b0ef189143 100644 --- a/plugins/modules/filesize.py +++ b/plugins/modules/filesize.py @@ -1,30 +1,31 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2021, quidame # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: filesize short_description: Create a file with a given size, or resize it if it exists description: - - This module is a simple wrapper around C(dd) to create, extend or truncate - a file, given its size. It can be used to manage swap files (that require - contiguous blocks) or alternatively, huge sparse files. - + - This module is a simple wrapper around C(dd) to create, extend or truncate a file, given its size. It can be used to manage + swap files (that require contiguous blocks) or alternatively, huge sparse files. author: - quidame (@quidame) version_added: "3.0.0" +attributes: + check_mode: + support: full + diff_mode: + support: full + options: path: description: @@ -34,73 +35,58 @@ options: size: description: - Requested size of the file. - - The value is a number (either C(int) or C(float)) optionally followed - by a multiplicative suffix, that can be one of C(B) (bytes), C(KB) or - C(kB) (= 1000B), C(MB) or C(mB) (= 1000kB), C(GB) or C(gB) (= 1000MB), - and so on for C(T), C(P), C(E), C(Z) and C(Y); or alternatively one of - C(K), C(k) or C(KiB) (= 1024B); C(M), C(m) or C(MiB) (= 1024KiB); - C(G), C(g) or C(GiB) (= 1024MiB); and so on. - - If the multiplicative suffix is not provided, the value is treated as - an integer number of blocks of I(blocksize) bytes each (float values - are rounded to the closest integer). - - When the I(size) value is equal to the current file size, does nothing. - - When the I(size) value is bigger than the current file size, bytes from - I(source) (if I(sparse) is not C(false)) are appended to the file - without truncating it, in other words, without modifying the existing - bytes of the file. - - When the I(size) value is smaller than the current file size, it is - truncated to the requested value without modifying bytes before this - value. - - That means that a file of any arbitrary size can be grown to any other - arbitrary size, and then resized down to its initial size without - modifying its initial content. + - The value is a number (either C(int) or C(float)) optionally followed by a multiplicative suffix, that can be one + of V(B) (bytes), V(KB) or V(kB) (= 1000B), V(MB) or V(mB) (= 1000kB), V(GB) or V(gB) (= 1000MB), and so on for V(T), + V(P), V(E), V(Z) and V(Y); or alternatively one of V(K), V(k) or V(KiB) (= 1024B); V(M), V(m) or V(MiB) (= 1024KiB); + V(G), V(g) or V(GiB) (= 1024MiB); and so on. + - If the multiplicative suffix is not provided, the value is treated as an integer number of blocks of O(blocksize) + bytes each (float values are rounded to the closest integer). + - When the O(size) value is equal to the current file size, does nothing. + - When the O(size) value is bigger than the current file size, bytes from O(source) (if O(sparse) is not V(false)) are + appended to the file without truncating it, in other words, without modifying the existing bytes of the file. + - When the O(size) value is smaller than the current file size, it is truncated to the requested value without modifying + bytes before this value. + - That means that a file of any arbitrary size can be grown to any other arbitrary size, and then resized down to its + initial size without modifying its initial content. type: raw required: true blocksize: description: - Size of blocks, in bytes if not followed by a multiplicative suffix. - - The numeric value (before the unit) C(MUST) be an integer (or a C(float) - if it equals an integer). - - If not set, the size of blocks is guessed from the OS and commonly - results in C(512) or C(4096) bytes, that is used internally by the - module or when I(size) has no unit. + - The numeric value (before the unit) B(MUST) be an integer (or a C(float) if it equals an integer). + - If not set, the size of blocks is guessed from the OS and commonly results in V(512) or V(4096) bytes, that is used + internally by the module or when O(size) has no unit. type: raw source: description: - Device or file that provides input data to provision the file. - - This parameter is ignored when I(sparse=true). + - This parameter is ignored when O(sparse=true). type: path default: /dev/zero force: description: - - Whether or not to overwrite the file if it exists, in other words, to - truncate it from 0. When C(true), the module is not idempotent, that - means it always reports I(changed=true). - - I(force=true) and I(sparse=true) are mutually exclusive. + - Whether or not to overwrite the file if it exists, in other words, to truncate it from 0. When V(true), the module + is not idempotent, that means it always reports C(changed=true). + - O(force=true) and O(sparse=true) are mutually exclusive. type: bool default: false sparse: description: - Whether or not the file to create should be a sparse file. - - This option is effective only on newly created files, or when growing a - file, only for the bytes to append. + - This option is effective only on newly created files, or when growing a file, only for the bytes to append. - This option is not supported on OSes or filesystems not supporting sparse files. - - I(force=true) and I(sparse=true) are mutually exclusive. + - O(force=true) and O(sparse=true) are mutually exclusive. type: bool default: false unsafe_writes: description: - - This option is silently ignored. This module always modifies file - size in-place. - -notes: - - This module supports C(check_mode) and C(diff). - + - This option is silently ignored. This module always modifies file size in-place. requirements: - dd (Data Duplicator) in PATH extends_documentation_fragment: - ansible.builtin.files + - community.general.attributes seealso: - name: dd(1) manpage for Linux @@ -134,9 +120,9 @@ seealso: - name: busybox(1) manpage for Linux description: Manual page of the GNU/Linux's busybox, that provides its own dd implementation. link: https://www.unix.com/man-page/linux/1/busybox -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a file of 1G filled with null bytes community.general.filesize: path: /var/bigfile @@ -179,9 +165,9 @@ EXAMPLES = r''' mode: u=rw,go= owner: root group: root -''' +""" -RETURN = r''' +RETURN = r""" cmd: description: Command executed to create or resize the file. type: str @@ -202,7 +188,7 @@ filesize: type: int sample: 1024 bytes: - description: Size of the file, in bytes, as the product of C(blocks) and C(blocksize). + description: Size of the file, in bytes, as the product of RV(filesize.blocks) and RV(filesize.blocksize). type: int sample: 512000 iec: @@ -225,7 +211,7 @@ path: type: str sample: /var/swap0 returned: always -''' +""" import re @@ -316,7 +302,7 @@ def split_size_unit(string, isint=False): Support optional space(s) between the numeric value and the unit. """ unit = re.sub(r'(\d|\.)', r'', string).strip() - value = float(re.sub(r'%s' % unit, r'', string).strip()) + value = float(re.sub(unit, r'', string).strip()) if isint and unit in ('B', ''): if int(value) != value: raise AssertionError("invalid blocksize value: bytes require an integer value") diff --git a/plugins/modules/filesystem.py b/plugins/modules/filesystem.py index 4dcfddee23..1477925de3 100644 --- a/plugins/modules/filesystem.py +++ b/plugins/modules/filesystem.py @@ -1,17 +1,14 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2021, quidame # Copyright (c) 2013, Alexander Bulimov # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" author: - Alexander Bulimov (@abulimov) - quidame (@quidame) @@ -19,81 +16,98 @@ module: filesystem short_description: Makes a filesystem description: - This module creates a filesystem. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: state: description: - - If I(state=present), the filesystem is created if it doesn't already - exist, that is the default behaviour if I(state) is omitted. - - If I(state=absent), filesystem signatures on I(dev) are wiped if it - contains a filesystem (as known by C(blkid)). - - When I(state=absent), all other options but I(dev) are ignored, and the - module doesn't fail if the device I(dev) doesn't actually exist. + - If O(state=present), the filesystem is created if it does not already exist, that is the default behaviour if O(state) + is omitted. + - If O(state=absent), filesystem signatures on O(dev) are wiped if it contains a filesystem (as known by C(blkid)). + - When O(state=absent), all other options but O(dev) are ignored, and the module does not fail if the device O(dev) + does not actually exist. type: str - choices: [ present, absent ] + choices: [present, absent] default: present version_added: 1.3.0 fstype: - choices: [ btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap, ufs ] + choices: [bcachefs, btrfs, ext2, ext3, ext4, ext4dev, f2fs, lvm, ocfs2, reiserfs, xfs, vfat, swap, ufs] description: - - Filesystem type to be created. This option is required with - I(state=present) (or if I(state) is omitted). - - ufs support has been added in community.general 3.4.0. + - Filesystem type to be created. This option is required with O(state=present) (or if O(state) is omitted). + - V(ufs) support has been added in community.general 3.4.0. + - V(bcachefs) support has been added in community.general 8.6.0. type: str aliases: [type] dev: description: - - Target path to block device (Linux) or character device (FreeBSD) or - regular file (both). - - When setting Linux-specific filesystem types on FreeBSD, this module - only works when applying to regular files, aka disk images. - - Currently C(lvm) (Linux-only) and C(ufs) (FreeBSD-only) don't support - a regular file as their target I(dev). + - Target path to block device (Linux) or character device (FreeBSD) or regular file (both). + - When setting Linux-specific filesystem types on FreeBSD, this module only works when applying to regular files, also known as + disk images. + - Currently V(lvm) (Linux-only) and V(ufs) (FreeBSD-only) do not support a regular file as their target O(dev). - Support for character devices on FreeBSD has been added in community.general 3.4.0. type: path required: true aliases: [device] force: description: - - If C(true), allows to create new filesystem on devices that already has filesystem. + - If V(true), allows to create new filesystem on devices that already has filesystem. type: bool default: false resizefs: description: - - If C(true), if the block device and filesystem size differ, grow the filesystem into the space. - - Supported for C(btrfs), C(ext2), C(ext3), C(ext4), C(ext4dev), C(f2fs), C(lvm), C(xfs), C(ufs) and C(vfat) filesystems. - Attempts to resize other filesystem types will fail. - - XFS Will only grow if mounted. Currently, the module is based on commands - from C(util-linux) package to perform operations, so resizing of XFS is - not supported on FreeBSD systems. - - vFAT will likely fail if C(fatresize < 1.04). + - If V(true), if the block device and filesystem size differ, grow the filesystem into the space. + - >- + Supported when O(fstype) is one of: V(bcachefs), V(btrfs), V(ext2), V(ext3), V(ext4), V(ext4dev), V(f2fs), V(lvm), V(xfs), V(ufs) and V(vfat). + Attempts to resize other filesystem types fail. + - XFS only grows if mounted. Currently, the module is based on commands from C(util-linux) package to perform operations, + so resizing of XFS is not supported on FreeBSD systems. + - VFAT is likely to fail if C(fatresize < 1.04). + - Mutually exclusive with O(uuid). type: bool default: false opts: description: - List of options to be passed to C(mkfs) command. type: str + uuid: + description: + - Set filesystem's UUID to the given value. + - The UUID options specified in O(opts) take precedence over this value. + - See xfs_admin(8) (C(xfs)), tune2fs(8) (C(ext2), C(ext3), C(ext4), C(ext4dev)) for possible values. + - For O(fstype=lvm) the value is ignored, it resets the PV UUID if set. + - Supported for O(fstype) being one of V(bcachefs), V(ext2), V(ext3), V(ext4), V(ext4dev), V(lvm), or V(xfs). + - This is B(not idempotent). Specifying this option always results in a change. + - Mutually exclusive with O(resizefs). + type: str + version_added: 7.1.0 requirements: - - Uses specific tools related to the I(fstype) for creating or resizing a - filesystem (from packages e2fsprogs, xfsprogs, dosfstools, and so on). - - Uses generic tools mostly related to the Operating System (Linux or - FreeBSD) or available on both, as C(blkid). + - Uses specific tools related to the O(fstype) for creating or resizing a filesystem (from packages e2fsprogs, xfsprogs, + dosfstools, and so on). + - Uses generic tools mostly related to the Operating System (Linux or FreeBSD) or available on both, as C(blkid). - On FreeBSD, either C(util-linux) or C(e2fsprogs) package is required. notes: - - Potential filesystems on I(dev) are checked using C(blkid). In case C(blkid) - is unable to detect a filesystem (and in case C(fstyp) on FreeBSD is also - unable to detect a filesystem), this filesystem is overwritten even if - I(force) is C(false). - - On FreeBSD systems, both C(e2fsprogs) and C(util-linux) packages provide - a C(blkid) command that is compatible with this module. However, these - packages conflict with each other, and only the C(util-linux) package - provides the command required to not fail when I(state=absent). - - This module supports I(check_mode). + - Potential filesystems on O(dev) are checked using C(blkid). In case C(blkid) is unable to detect a filesystem (and in + case C(fstyp) on FreeBSD is also unable to detect a filesystem), this filesystem is overwritten even if O(force=false). + - On FreeBSD systems, both C(e2fsprogs) and C(util-linux) packages provide a C(blkid) command that is compatible with this + module. However, these packages conflict with each other, and only the C(util-linux) package provides the command required + to not fail when O(state=absent). seealso: - module: community.general.filesize - module: ansible.posix.mount -''' + - name: xfs_admin(8) manpage for Linux + description: Manual page of the GNU/Linux's xfs_admin implementation. + link: https://man7.org/linux/man-pages/man8/xfs_admin.8.html + - name: tune2fs(8) manpage for Linux + description: Manual page of the GNU/Linux's tune2fs implementation. + link: https://man7.org/linux/man-pages/man8/tune2fs.8.html +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a ext2 filesystem on /dev/sdb1 community.general.filesystem: fstype: ext2 @@ -114,7 +128,25 @@ EXAMPLES = ''' community.general.filesystem: dev: /path/to/disk.img fstype: vfat -''' + +- name: Reset an xfs filesystem UUID on /dev/sdb1 + community.general.filesystem: + fstype: xfs + dev: /dev/sdb1 + uuid: generate + +- name: Reset an ext4 filesystem UUID on /dev/sdb1 + community.general.filesystem: + fstype: ext4 + dev: /dev/sdb1 + uuid: random + +- name: Reset an LVM filesystem (PV) UUID on /dev/sdc + community.general.filesystem: + fstype: lvm + dev: /dev/sdc + uuid: random +""" import os import platform @@ -172,10 +204,15 @@ class Filesystem(object): MKFS = None MKFS_FORCE_FLAGS = [] + MKFS_SET_UUID_OPTIONS = None + MKFS_SET_UUID_EXTRA_OPTIONS = [] INFO = None GROW = None GROW_MAX_SPACE_FLAGS = [] GROW_MOUNTPOINT_ONLY = False + CHANGE_UUID = None + CHANGE_UUID_OPTION = None + CHANGE_UUID_OPTION_HAS_ARG = True LANG_ENV = {'LANG': 'C', 'LC_ALL': 'C', 'LC_MESSAGES': 'C'} @@ -194,13 +231,19 @@ class Filesystem(object): """ raise NotImplementedError() - def create(self, opts, dev): + def create(self, opts, dev, uuid=None): if self.module.check_mode: return + if uuid and self.MKFS_SET_UUID_OPTIONS: + if not (set(self.MKFS_SET_UUID_OPTIONS) & set(opts)): + opts += [self.MKFS_SET_UUID_OPTIONS[0], uuid] + self.MKFS_SET_UUID_EXTRA_OPTIONS + mkfs = self.module.get_bin_path(self.MKFS, required=True) cmd = [mkfs] + self.MKFS_FORCE_FLAGS + opts + [str(dev)] self.module.run_command(cmd, check_rc=True) + if uuid and self.CHANGE_UUID and self.MKFS_SET_UUID_OPTIONS is None: + self.change_uuid(new_uuid=uuid, dev=dev) def wipefs(self, dev): if self.module.check_mode: @@ -249,11 +292,31 @@ class Filesystem(object): dummy, out, dummy = self.module.run_command(self.grow_cmd(grow_target), check_rc=True) return out + def change_uuid_cmd(self, new_uuid, target): + """Build and return the UUID change command line as list.""" + cmdline = [self.module.get_bin_path(self.CHANGE_UUID, required=True)] + if self.CHANGE_UUID_OPTION_HAS_ARG: + cmdline += [self.CHANGE_UUID_OPTION, new_uuid, target] + else: + cmdline += [self.CHANGE_UUID_OPTION, target] + return cmdline + + def change_uuid(self, new_uuid, dev): + """Change filesystem UUID. Returns stdout of used command""" + if self.module.check_mode: + self.module.exit_json(change=True, msg='Changing %s filesystem UUID on device %s' % (self.fstype, dev)) + + dummy, out, dummy = self.module.run_command(self.change_uuid_cmd(new_uuid=new_uuid, target=str(dev)), check_rc=True) + return out + class Ext(Filesystem): MKFS_FORCE_FLAGS = ['-F'] + MKFS_SET_UUID_OPTIONS = ['-U'] INFO = 'tune2fs' GROW = 'resize2fs' + CHANGE_UUID = 'tune2fs' + CHANGE_UUID_OPTION = "-U" def get_fs_size(self, dev): """Get Block count and Block size and return their product.""" @@ -292,6 +355,8 @@ class XFS(Filesystem): INFO = 'xfs_info' GROW = 'xfs_growfs' GROW_MOUNTPOINT_ONLY = True + CHANGE_UUID = "xfs_admin" + CHANGE_UUID_OPTION = "-U" def get_fs_size(self, dev): """Get bsize and blocks and return their product.""" @@ -330,6 +395,48 @@ class Reiserfs(Filesystem): MKFS_FORCE_FLAGS = ['-q'] +class Bcachefs(Filesystem): + MKFS = 'mkfs.bcachefs' + MKFS_FORCE_FLAGS = ['--force'] + MKFS_SET_UUID_OPTIONS = ['-U', '--uuid'] + INFO = 'bcachefs' + GROW = 'bcachefs' + GROW_MAX_SPACE_FLAGS = ['device', 'resize'] + + def get_fs_size(self, dev): + """Return size in bytes of filesystem on device (integer).""" + dummy, stdout, dummy = self.module.run_command([self.module.get_bin_path(self.INFO), + 'show-super', str(dev)], check_rc=True) + + for line in stdout.splitlines(): + if "Size: " in line: + parts = line.split() + unit = parts[2] + + base = None + exp = None + + units_2 = ["B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"] + units_10 = ["B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"] + + try: + exp = units_2.index(unit) + base = 1024 + except ValueError: + exp = units_10.index(unit) + base = 1000 + + if exp == 0: + value = int(parts[1]) + else: + value = float(parts[1]) + + if base is not None and exp is not None: + return int(value * pow(base, exp)) + + raise ValueError(repr(stdout)) + + class Btrfs(Filesystem): MKFS = 'mkfs.btrfs' INFO = 'btrfs' @@ -445,8 +552,13 @@ class VFAT(Filesystem): class LVM(Filesystem): MKFS = 'pvcreate' MKFS_FORCE_FLAGS = ['-f'] + MKFS_SET_UUID_OPTIONS = ['-u', '--uuid'] + MKFS_SET_UUID_EXTRA_OPTIONS = ['--norestorefile'] INFO = 'pvs' GROW = 'pvresize' + CHANGE_UUID = 'pvchange' + CHANGE_UUID_OPTION = '-u' + CHANGE_UUID_OPTION_HAS_ARG = False def get_fs_size(self, dev): """Get and return PV size, in bytes.""" @@ -487,6 +599,7 @@ class UFS(Filesystem): FILESYSTEMS = { + 'bcachefs': Bcachefs, 'ext2': Ext2, 'ext3': Ext3, 'ext4': Ext4, @@ -519,10 +632,14 @@ def main(): opts=dict(type='str'), force=dict(type='bool', default=False), resizefs=dict(type='bool', default=False), + uuid=dict(type='str'), ), required_if=[ ('state', 'present', ['fstype']) ], + mutually_exclusive=[ + ('resizefs', 'uuid'), + ], supports_check_mode=True, ) @@ -532,6 +649,7 @@ def main(): opts = module.params['opts'] force = module.params['force'] resizefs = module.params['resizefs'] + uuid = module.params['uuid'] mkfs_opts = [] if opts is not None: @@ -570,21 +688,30 @@ def main(): filesystem = klass(module) + if uuid and not (filesystem.CHANGE_UUID or filesystem.MKFS_SET_UUID_OPTIONS): + module.fail_json(changed=False, msg="module does not support UUID option for this filesystem (%s) yet." % fstype) + same_fs = fs and FILESYSTEMS.get(fs) == FILESYSTEMS[fstype] - if same_fs and not resizefs and not force: + if same_fs and not resizefs and not uuid and not force: module.exit_json(changed=False) - elif same_fs and resizefs: - if not filesystem.GROW: - module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % fstype) + elif same_fs: + if resizefs: + if not filesystem.GROW: + module.fail_json(changed=False, msg="module does not support resizing %s filesystem yet." % fstype) - out = filesystem.grow(dev) + out = filesystem.grow(dev) - module.exit_json(changed=True, msg=out) + module.exit_json(changed=True, msg=out) + elif uuid: + + out = filesystem.change_uuid(new_uuid=uuid, dev=dev) + + module.exit_json(changed=True, msg=out) elif fs and not force: module.fail_json(msg="'%s' is already used as %s, use force=true to overwrite" % (dev, fs), rc=rc, err=err) # create fs - filesystem.create(mkfs_opts, dev) + filesystem.create(opts=mkfs_opts, dev=dev, uuid=uuid) changed = True elif fs: diff --git a/plugins/modules/flatpak.py b/plugins/modules/flatpak.py index d6264a50d3..3fab8f820b 100644 --- a/plugins/modules/flatpak.py +++ b/plugins/modules/flatpak.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017 John Kwiatkoski (@JayKayy) # Copyright (c) 2018 Alexander Bethke (@oolongbrothers) @@ -7,83 +6,85 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: flatpak short_description: Manage flatpaks description: -- Allows users to add or remove flatpaks. -- See the M(community.general.flatpak_remote) module for managing flatpak remotes. + - Allows users to add or remove flatpaks. + - See the M(community.general.flatpak_remote) module for managing flatpak remotes. author: -- John Kwiatkoski (@JayKayy) -- Alexander Bethke (@oolongbrothers) + - John Kwiatkoski (@JayKayy) + - Alexander Bethke (@oolongbrothers) requirements: -- flatpak + - flatpak +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: partial + details: + - If O(state=latest), the module always returns RV(ignore:changed=true). + diff_mode: + support: none options: executable: description: - - The path to the C(flatpak) executable to use. - - By default, this module looks for the C(flatpak) executable on the path. + - The path to the C(flatpak) executable to use. + - By default, this module looks for the C(flatpak) executable on the path. type: path default: flatpak method: description: - - The installation method to use. - - Defines if the I(flatpak) is supposed to be installed globally for the whole C(system) - or only for the current C(user). + - The installation method to use. + - Defines if the C(flatpak) is supposed to be installed globally for the whole V(system) or only for the current V(user). type: str - choices: [ system, user ] + choices: [system, user] default: system name: description: - - The name of the flatpak to manage. To operate on several packages this - can accept a list of packages. - - When used with I(state=present), I(name) can be specified as a URL to a - C(flatpakref) file or the unique reverse DNS name that identifies a flatpak. - - Both C(https://) and C(http://) URLs are supported. - - When supplying a reverse DNS name, you can use the I(remote) option to specify on what remote - to look for the flatpak. An example for a reverse DNS name is C(org.gnome.gedit). - - When used with I(state=absent), it is recommended to specify the name in the reverse DNS - format. - - When supplying a URL with I(state=absent), the module will try to match the - installed flatpak based on the name of the flatpakref to remove it. However, there is no - guarantee that the names of the flatpakref file and the reverse DNS name of the installed - flatpak do match. + - The name of the flatpak to manage. To operate on several packages this can accept a list of packages. + - When used with O(state=present), O(name) can be specified as a URL to a C(flatpakref) file or the unique reverse DNS + name that identifies a flatpak. + - Both C(https://) and C(http://) URLs are supported. + - When supplying a reverse DNS name, you can use the O(remote) option to specify on what remote to look for the flatpak. + An example for a reverse DNS name is C(org.gnome.gedit). + - When used with O(state=absent) or O(state=latest), it is recommended to specify the name in the reverse DNS format. + - When supplying a URL with O(state=absent) or O(state=latest), the module tries to match the installed flatpak based + on the name of the flatpakref to remove or update it. However, there is no guarantee that the names of the flatpakref + file and the reverse DNS name of the installed flatpak do match. type: list elements: str required: true no_dependencies: description: - - If installing runtime dependencies should be omitted or not - - This parameter is primarily implemented for integration testing this module. - There might however be some use cases where you would want to have this, like when you are - packaging your own flatpaks. + - If installing runtime dependencies should be omitted or not. + - This parameter is primarily implemented for integration testing this module. There might however be some use cases + where you would want to have this, like when you are packaging your own flatpaks. type: bool default: false version_added: 3.2.0 remote: description: - - The flatpak remote (repository) to install the flatpak from. - - By default, C(flathub) is assumed, but you do need to add the flathub flatpak_remote before - you can use this. - - See the M(community.general.flatpak_remote) module for managing flatpak remotes. + - The flatpak remote (repository) to install the flatpak from. + - By default, V(flathub) is assumed, but you do need to add the flathub flatpak_remote before you can use this. + - See the M(community.general.flatpak_remote) module for managing flatpak remotes. type: str default: flathub state: description: - - Indicates the desired package state. - choices: [ absent, present ] + - Indicates the desired package state. + - The value V(latest) is supported since community.general 8.6.0. + choices: [absent, present, latest] type: str default: present -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Install the spotify flatpak community.general.flatpak: - name: https://s3.amazonaws.com/alexlarsson/spotify-repo/spotify.flatpakref + name: https://s3.amazonaws.com/alexlarsson/spotify-repo/spotify.flatpakref state: present - name: Install the gedit flatpak package without dependencies (not recommended) @@ -104,6 +105,12 @@ EXAMPLES = r''' state: present remote: gnome +- name: Install GIMP using custom flatpak binary path + community.general.flatpak: + name: org.gimp.GIMP + state: present + executable: /usr/local/bin/flatpak-dev + - name: Install multiple packages community.general.flatpak: name: @@ -111,6 +118,37 @@ EXAMPLES = r''' - org.inkscape.Inkscape - org.mozilla.firefox +- name: Update the spotify flatpak + community.general.flatpak: + name: https://s3.amazonaws.com/alexlarsson/spotify-repo/spotify.flatpakref + state: latest + +- name: Update the gedit flatpak package without dependencies (not recommended) + community.general.flatpak: + name: https://git.gnome.org/browse/gnome-apps-nightly/plain/gedit.flatpakref + state: latest + no_dependencies: true + +- name: Update the gedit package from flathub for current user + community.general.flatpak: + name: org.gnome.gedit + state: latest + method: user + +- name: Update the Gnome Calendar flatpak from the gnome remote system-wide + community.general.flatpak: + name: org.gnome.Calendar + state: latest + remote: gnome + +- name: Update multiple packages + community.general.flatpak: + name: + - org.gimp.GIMP + - org.inkscape.Inkscape + - org.mozilla.firefox + state: latest + - name: Remove the gedit flatpak community.general.flatpak: name: org.gnome.gedit @@ -123,37 +161,18 @@ EXAMPLES = r''' - org.inkscape.Inkscape - org.mozilla.firefox state: absent -''' +""" -RETURN = r''' +RETURN = r""" command: - description: The exact flatpak command that was executed + description: The exact flatpak command that was executed. returned: When a flatpak command has been executed type: str sample: "/usr/bin/flatpak install --user --nontinteractive flathub org.gnome.Calculator" -msg: - description: Module error message - returned: failure - type: str - sample: "Executable '/usr/local/bin/flatpak' was not found on the system." -rc: - description: Return code from flatpak binary - returned: When a flatpak command has been executed - type: int - sample: 0 -stderr: - description: Error output from flatpak binary - returned: When a flatpak command has been executed - type: str - sample: "error: Error searching remote flathub: Can't find ref org.gnome.KDE" -stdout: - description: Output from flatpak binary - returned: When a flatpak command has been executed - type: str - sample: "org.gnome.Calendar/x86_64/stable\tcurrent\norg.gnome.gitg/x86_64/stable\tcurrent\n" -''' +""" + +from urllib.parse import urlparse -from ansible.module_utils.six.moves.urllib.parse import urlparse from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.version import LooseVersion @@ -188,6 +207,28 @@ def install_flat(module, binary, remote, names, method, no_dependencies): result['changed'] = True +def update_flat(module, binary, names, method, no_dependencies): + """Update existing flatpaks.""" + global result # pylint: disable=global-variable-not-assigned + installed_flat_names = [ + _match_installed_flat_name(module, binary, name, method) + for name in names + ] + command = [binary, "update", "--{0}".format(method)] + flatpak_version = _flatpak_version(module, binary) + if LooseVersion(flatpak_version) < LooseVersion('1.1.3'): + command += ["-y"] + else: + command += ["--noninteractive"] + if no_dependencies: + command += ["--no-deps"] + command += installed_flat_names + stdout = _flatpak_command(module, module.check_mode, command) + result["changed"] = ( + True if module.check_mode else stdout.find("Nothing to do.") == -1 + ) + + def uninstall_flat(module, binary, names, method): """Remove existing flatpaks.""" global result # pylint: disable=global-variable-not-assigned @@ -208,7 +249,7 @@ def uninstall_flat(module, binary, names, method): def flatpak_exists(module, binary, names, method): """Check if the flatpaks are installed.""" - command = [binary, "list", "--{0}".format(method), "--app"] + command = [binary, "list", "--{0}".format(method)] output = _flatpak_command(module, False, command) installed = [] not_installed = [] @@ -266,13 +307,39 @@ def _match_flat_using_flatpak_column_feature(module, binary, parsed_name, method return row.split()[0] +def _is_flatpak_id(part): + # For guidelines on application IDs, refer to the following resources: + # Flatpak: + # https://docs.flatpak.org/en/latest/conventions.html#application-ids + # Flathub: + # https://docs.flathub.org/docs/for-app-authors/requirements#application-id + if '.' not in part: + return False + sections = part.split('.') + if len(sections) < 2: + return False + domain = sections[0] + if not domain.islower(): + return False + for section in sections[1:]: + if not section.isalnum(): + return False + return True + + def _parse_flatpak_name(name): if name.startswith('http://') or name.startswith('https://'): file_name = urlparse(name).path.split('/')[-1] file_name_without_extension = file_name.split('.')[0:-1] common_name = ".".join(file_name_without_extension) else: - common_name = name + parts = name.split('/') + for part in parts: + if _is_flatpak_id(part): + common_name = part + break + else: + common_name = name return common_name @@ -306,7 +373,7 @@ def main(): method=dict(type='str', default='system', choices=['user', 'system']), state=dict(type='str', default='present', - choices=['absent', 'present']), + choices=['absent', 'present', 'latest']), no_dependencies=dict(type='bool', default=False), executable=dict(type='path', default='flatpak') ), @@ -330,11 +397,16 @@ def main(): if not binary: module.fail_json(msg="Executable '%s' was not found on the system." % executable, **result) + module.run_command_environ_update = dict(LANGUAGE='C', LC_ALL='C') + installed, not_installed = flatpak_exists(module, binary, name, method) - if state == 'present' and not_installed: - install_flat(module, binary, remote, not_installed, method, no_dependencies) - elif state == 'absent' and installed: + if state == 'absent' and installed: uninstall_flat(module, binary, installed, method) + else: + if state == 'latest' and installed: + update_flat(module, binary, installed, method, no_dependencies) + if state in ('present', 'latest') and not_installed: + install_flat(module, binary, remote, not_installed, method, no_dependencies) module.exit_json(**result) diff --git a/plugins/modules/flatpak_remote.py b/plugins/modules/flatpak_remote.py index bc52d5461a..891942143d 100644 --- a/plugins/modules/flatpak_remote.py +++ b/plugins/modules/flatpak_remote.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017 John Kwiatkoski (@JayKayy) # Copyright (c) 2018 Alexander Bethke (@oolongbrothers) @@ -7,65 +6,73 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: flatpak_remote short_description: Manage flatpak repository remotes description: -- Allows users to add or remove flatpak remotes. -- The flatpak remotes concept is comparable to what is called repositories in other packaging - formats. -- Currently, remote addition is only supported via I(flatpakrepo) file URLs. -- Existing remotes will not be updated. -- See the M(community.general.flatpak) module for managing flatpaks. + - Allows users to add or remove flatpak remotes. + - The flatpak remotes concept is comparable to what is called repositories in other packaging formats. + - Currently, remote addition is only supported using C(flatpakrepo) file URLs. + - Existing remotes are not updated. + - See the M(community.general.flatpak) module for managing flatpaks. author: -- John Kwiatkoski (@JayKayy) -- Alexander Bethke (@oolongbrothers) + - John Kwiatkoski (@JayKayy) + - Alexander Bethke (@oolongbrothers) requirements: -- flatpak + - flatpak +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: executable: description: - - The path to the C(flatpak) executable to use. - - By default, this module looks for the C(flatpak) executable on the path. + - The path to the C(flatpak) executable to use. + - By default, this module looks for the C(flatpak) executable on the path. type: str default: flatpak flatpakrepo_url: description: - - The URL to the I(flatpakrepo) file representing the repository remote to add. - - When used with I(state=present), the flatpak remote specified under the I(flatpakrepo_url) - is added using the specified installation C(method). - - When used with I(state=absent), this is not required. - - Required when I(state=present). + - The URL to the C(flatpakrepo) file representing the repository remote to add. + - When used with O(state=present), the flatpak remote specified under the O(flatpakrepo_url) is added using the specified + installation O(method). + - When used with O(state=absent), this is not required. + - Required when O(state=present). type: str method: description: - - The installation method to use. - - Defines if the I(flatpak) is supposed to be installed globally for the whole C(system) - or only for the current C(user). + - The installation method to use. + - Defines if the C(flatpak) is supposed to be installed globally for the whole V(system) or only for the current V(user). type: str - choices: [ system, user ] + choices: [system, user] default: system name: description: - - The desired name for the flatpak remote to be registered under on the managed host. - - When used with I(state=present), the remote will be added to the managed host under - the specified I(name). - - When used with I(state=absent) the remote with that name will be removed. + - The desired name for the flatpak remote to be registered under on the managed host. + - When used with O(state=present), the remote is added to the managed host under the specified O(name). + - When used with O(state=absent) the remote with that name is removed. type: str required: true state: description: - - Indicates the desired package state. + - Indicates the desired package state. type: str - choices: [ absent, present ] + choices: [absent, present] default: present -''' + enabled: + description: + - Indicates whether this remote is enabled. + type: bool + default: true + version_added: 6.4.0 +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Add the Gnome flatpak remote to the system installation community.general.flatpak_remote: name: gnome @@ -89,35 +96,21 @@ EXAMPLES = r''' community.general.flatpak_remote: name: flathub state: absent -''' -RETURN = r''' +- name: Disable the flathub remote in the system installation + community.general.flatpak_remote: + name: flathub + state: present + enabled: false +""" + +RETURN = r""" command: - description: The exact flatpak command that was executed + description: The exact flatpak command that was executed. returned: When a flatpak command has been executed type: str sample: "/usr/bin/flatpak remote-add --system flatpak-test https://dl.flathub.org/repo/flathub.flatpakrepo" -msg: - description: Module error message - returned: failure - type: str - sample: "Executable '/usr/local/bin/flatpak' was not found on the system." -rc: - description: Return code from flatpak binary - returned: When a flatpak command has been executed - type: int - sample: 0 -stderr: - description: Error output from flatpak binary - returned: When a flatpak command has been executed - type: str - sample: "error: GPG verification enabled, but no summary found (check that the configured URL in remote config is correct)\n" -stdout: - description: Output from flatpak binary - returned: When a flatpak command has been executed - type: str - sample: "flathub\tFlathub\thttps://dl.flathub.org/repo/\t1\t\n" -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_bytes, to_native @@ -141,7 +134,7 @@ def remove_remote(module, binary, name, method): def remote_exists(module, binary, name, method): """Check if the remote exists.""" - command = [binary, "remote-list", "-d", "--{0}".format(method)] + command = [binary, "remote-list", "--show-disabled", "--{0}".format(method)] # The query operation for the remote needs to be run even in check mode output = _flatpak_command(module, False, command) for line in output.splitlines(): @@ -153,6 +146,36 @@ def remote_exists(module, binary, name, method): return False +def enable_remote(module, binary, name, method): + """Enable a remote.""" + global result # pylint: disable=global-variable-not-assigned + command = [binary, "remote-modify", "--enable", "--{0}".format(method), name] + _flatpak_command(module, module.check_mode, command) + result['changed'] = True + + +def disable_remote(module, binary, name, method): + """Disable a remote.""" + global result # pylint: disable=global-variable-not-assigned + command = [binary, "remote-modify", "--disable", "--{0}".format(method), name] + _flatpak_command(module, module.check_mode, command) + result['changed'] = True + + +def remote_enabled(module, binary, name, method): + """Check if the remote is enabled.""" + command = [binary, "remote-list", "--show-disabled", "--{0}".format(method)] + # The query operation for the remote needs to be run even in check mode + output = _flatpak_command(module, False, command) + for line in output.splitlines(): + listed_remote = line.split() + if len(listed_remote) == 0: + continue + if listed_remote[0] == to_native(name): + return len(listed_remote) == 1 or "disabled" not in listed_remote[1].split(",") + return False + + def _flatpak_command(module, noop, command): global result # pylint: disable=global-variable-not-assigned result['command'] = ' '.join(command) @@ -175,6 +198,7 @@ def main(): choices=['user', 'system']), state=dict(type='str', default="present", choices=['absent', 'present']), + enabled=dict(type='bool', default=True), executable=dict(type='str', default="flatpak") ), # This module supports check mode @@ -185,6 +209,7 @@ def main(): flatpakrepo_url = module.params['flatpakrepo_url'] method = module.params['method'] state = module.params['state'] + enabled = module.params['enabled'] executable = module.params['executable'] binary = module.get_bin_path(executable, None) @@ -207,6 +232,14 @@ def main(): elif state == 'absent' and remote_already_exists: remove_remote(module, binary, name, method) + if state == 'present': + remote_already_enabled = remote_enabled(module, binary, to_bytes(name), method) + + if enabled and not remote_already_enabled: + enable_remote(module, binary, name, method) + if not enabled and remote_already_enabled: + disable_remote(module, binary, name, method) + module.exit_json(**result) diff --git a/plugins/modules/flowdock.py b/plugins/modules/flowdock.py deleted file mode 100644 index 965ae62d7a..0000000000 --- a/plugins/modules/flowdock.py +++ /dev/null @@ -1,198 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2013 Matt Coddington -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: flowdock -author: "Matt Coddington (@mcodd)" -short_description: Send a message to a flowdock -description: - - Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat) -options: - token: - type: str - description: - - API token. - required: true - type: - type: str - description: - - Whether to post to 'inbox' or 'chat' - required: true - choices: [ "inbox", "chat" ] - msg: - type: str - description: - - Content of the message - required: true - tags: - type: str - description: - - tags of the message, separated by commas - required: false - external_user_name: - type: str - description: - - (chat only - required) Name of the "user" sending the message - required: false - from_address: - type: str - description: - - (inbox only - required) Email address of the message sender - required: false - source: - type: str - description: - - (inbox only - required) Human readable identifier of the application that uses the Flowdock API - required: false - subject: - type: str - description: - - (inbox only - required) Subject line of the message - required: false - from_name: - type: str - description: - - (inbox only) Name of the message sender - required: false - reply_to: - type: str - description: - - (inbox only) Email address for replies - required: false - project: - type: str - description: - - (inbox only) Human readable identifier for more detailed message categorization - required: false - link: - type: str - description: - - (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox. - required: false - validate_certs: - description: - - If C(false), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - required: false - default: true - type: bool - -requirements: [ ] -''' - -EXAMPLES = ''' -- name: Send a message to a flowdock - community.general.flowdock: - type: inbox - token: AAAAAA - from_address: user@example.com - source: my cool app - msg: test from ansible - subject: test subject - -- name: Send a message to a flowdock - community.general.flowdock: - type: chat - token: AAAAAA - external_user_name: testuser - msg: test from ansible - tags: tag1,tag2,tag3 -''' - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import fetch_url - - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - token=dict(required=True, no_log=True), - msg=dict(required=True), - type=dict(required=True, choices=["inbox", "chat"]), - external_user_name=dict(required=False), - from_address=dict(required=False), - source=dict(required=False), - subject=dict(required=False), - from_name=dict(required=False), - reply_to=dict(required=False), - project=dict(required=False), - tags=dict(required=False), - link=dict(required=False), - validate_certs=dict(default=True, type='bool'), - ), - supports_check_mode=True - ) - - type = module.params["type"] - token = module.params["token"] - if type == 'inbox': - url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token) - else: - url = "https://api.flowdock.com/v1/messages/chat/%s" % (token) - - params = {} - - # required params - params['content'] = module.params["msg"] - - # required params for the 'chat' type - if module.params['external_user_name']: - if type == 'inbox': - module.fail_json(msg="external_user_name is not valid for the 'inbox' type") - else: - params['external_user_name'] = module.params["external_user_name"] - elif type == 'chat': - module.fail_json(msg="external_user_name is required for the 'chat' type") - - # required params for the 'inbox' type - for item in ['from_address', 'source', 'subject']: - if module.params[item]: - if type == 'chat': - module.fail_json(msg="%s is not valid for the 'chat' type" % item) - else: - params[item] = module.params[item] - elif type == 'inbox': - module.fail_json(msg="%s is required for the 'inbox' type" % item) - - # optional params - if module.params["tags"]: - params['tags'] = module.params["tags"] - - # optional params for the 'inbox' type - for item in ['from_name', 'reply_to', 'project', 'link']: - if module.params[item]: - if type == 'chat': - module.fail_json(msg="%s is not valid for the 'chat' type" % item) - else: - params[item] = module.params[item] - - # If we're in check mode, just exit pretending like we succeeded - if module.check_mode: - module.exit_json(changed=False) - - # Send the data to Flowdock - data = urlencode(params) - response, info = fetch_url(module, url, data=data) - if info['status'] != 200: - module.fail_json(msg="unable to send msg: %s" % info['msg']) - - module.exit_json(changed=True, msg=module.params["msg"]) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/gandi_livedns.py b/plugins/modules/gandi_livedns.py index 9566661f41..0d6f93529d 100644 --- a/plugins/modules/gandi_livedns.py +++ b/plugins/modules/gandi_livedns.py @@ -1,43 +1,54 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2019 Gregory Thiemonge # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: gandi_livedns author: -- Gregory Thiemonge (@gthiemonge) + - Gregory Thiemonge (@gthiemonge) version_added: "2.3.0" short_description: Manage Gandi LiveDNS records description: -- "Manages DNS records by the Gandi LiveDNS API, see the docs: U(https://doc.livedns.gandi.net/)." + - 'Manages DNS records by the Gandi LiveDNS API, see the docs: U(https://doc.livedns.gandi.net/).' +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: + personal_access_token: + description: + - Scoped API token. + - One of O(personal_access_token) and O(api_key) must be specified. + type: str + version_added: 9.0.0 api_key: description: - - Account API token. + - Account API token. + - Note that these type of keys are deprecated and might stop working at some point. Use personal access tokens instead. + - One of O(personal_access_token) and O(api_key) must be specified. type: str - required: true record: description: - - Record to add. + - Record to add. type: str required: true state: description: - - Whether the record(s) should exist or not. + - Whether the record(s) should exist or not. type: str - choices: [ absent, present ] + choices: [absent, present] default: present ttl: description: - - The TTL to give the new record. - - Required when I(state=present). + - The TTL to give the new record. + - Required when O(state=present). type: int type: description: @@ -46,29 +57,27 @@ options: required: true values: description: - - The record values. - - Required when I(state=present). + - The record values. + - Required when O(state=present). type: list elements: str domain: description: - - The name of the Domain to work with (for example, "example.com"). + - The name of the Domain to work with (for example, V(example.com)). required: true type: str -notes: -- Supports C(check_mode). -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a test A record to point to 127.0.0.1 in the my.com domain community.general.gandi_livedns: domain: my.com record: test type: A values: - - 127.0.0.1 + - 127.0.0.1 ttl: 7200 - api_key: dummyapitoken + personal_access_token: dummytoken register: record - name: Create a mail CNAME record to www.my.com domain @@ -77,9 +86,9 @@ EXAMPLES = r''' type: CNAME record: mail values: - - www + - www ttl: 7200 - api_key: dummyapitoken + personal_access_token: dummytoken state: present - name: Change its TTL @@ -88,9 +97,9 @@ EXAMPLES = r''' type: CNAME record: mail values: - - www + - www ttl: 10800 - api_key: dummyapitoken + personal_access_token: dummytoken state: present - name: Delete the record @@ -98,45 +107,55 @@ EXAMPLES = r''' domain: my.com type: CNAME record: mail - api_key: dummyapitoken + personal_access_token: dummytoken state: absent -''' -RETURN = r''' +- name: Use a (deprecated) API Key + community.general.gandi_livedns: + domain: my.com + record: test + type: A + values: + - 127.0.0.1 + ttl: 7200 + api_key: dummyapikey +""" + +RETURN = r""" record: - description: A dictionary containing the record data. - returned: success, except on record deletion - type: dict - contains: - values: - description: The record content (details depend on record type). - returned: success - type: list - elements: str - sample: - - 192.0.2.91 - - 192.0.2.92 - record: - description: The record name. - returned: success - type: str - sample: www - ttl: - description: The time-to-live for the record. - returned: success - type: int - sample: 300 - type: - description: The record type. - returned: success - type: str - sample: A - domain: - description: The domain associated with the record. - returned: success - type: str - sample: my.com -''' + description: A dictionary containing the record data. + returned: success, except on record deletion + type: dict + contains: + values: + description: The record content (details depend on record type). + returned: success + type: list + elements: str + sample: + - 192.0.2.91 + - 192.0.2.92 + record: + description: The record name. + returned: success + type: str + sample: www + ttl: + description: The time-to-live for the record. + returned: success + type: int + sample: 300 + type: + description: The record type. + returned: success + type: str + sample: A + domain: + description: The domain associated with the record. + returned: success + type: str + sample: my.com +""" from ansible.module_utils.basic import AnsibleModule @@ -146,7 +165,8 @@ from ansible_collections.community.general.plugins.module_utils.gandi_livedns_ap def main(): module = AnsibleModule( argument_spec=dict( - api_key=dict(type='str', required=True, no_log=True), + api_key=dict(type='str', no_log=True), + personal_access_token=dict(type='str', no_log=True), record=dict(type='str', required=True), state=dict(type='str', default='present', choices=['absent', 'present']), ttl=dict(type='int'), @@ -158,6 +178,12 @@ def main(): required_if=[ ('state', 'present', ['values', 'ttl']), ], + mutually_exclusive=[ + ('api_key', 'personal_access_token'), + ], + required_one_of=[ + ('api_key', 'personal_access_token'), + ], ) gandi_api = GandiLiveDNSAPI(module) diff --git a/plugins/modules/gconftool2.py b/plugins/modules/gconftool2.py index a3ac8bb8f1..4092a8b7e6 100644 --- a/plugins/modules/gconftool2.py +++ b/plugins/modules/gconftool2.py @@ -1,62 +1,67 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2016, Kenneth D. Evensen # Copyright (c) 2017, Abhijeet Kasurde # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: gconftool2 author: - - Kenneth D. Evensen (@kevensen) + - Kenneth D. Evensen (@kevensen) short_description: Edit GNOME Configurations description: - - This module allows for the manipulation of GNOME 2 Configuration via - gconftool-2. Please see the gconftool-2(1) man pages for more details. + - This module allows for the manipulation of GNOME 2 Configuration using C(gconftool-2). Please see the gconftool-2(1) man + pages for more details. +seealso: + - name: C(gconftool-2) command manual page + description: Manual page for the command. + link: https://help.gnome.org/admin//system-admin-guide/2.32/gconf-6.html.en + +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: key: type: str description: - - A GConf preference key is an element in the GConf repository - that corresponds to an application preference. See man gconftool-2(1). + - A GConf preference key is an element in the GConf repository that corresponds to an application preference. required: true value: type: str description: - - Preference keys typically have simple values such as strings, - integers, or lists of strings and integers. This is ignored if the state - is "get". See man gconftool-2(1). + - Preference keys typically have simple values such as strings, integers, or lists of strings and integers. This is + ignored unless O(state=present). value_type: type: str description: - - The type of value being set. This is ignored if the state is "get". - choices: [ bool, float, int, string ] + - The type of value being set. This is ignored unless O(state=present). + choices: [bool, float, int, string] state: type: str description: - - The action to take upon the key/value. - - State C(get) is deprecated and will be removed in community.general 8.0.0. Please use the module M(community.general.gconftool2_info) instead. + - The action to take upon the key/value. required: true - choices: [ absent, get, present ] + choices: [absent, present] config_source: type: str description: - - Specify a configuration source to use rather than the default path. - See man gconftool-2(1). + - Specify a configuration source to use rather than the default path. direct: description: - - Access the config database directly, bypassing server. If direct is - specified then the config_source must be specified as well. - See man gconftool-2(1). + - Access the config database directly, bypassing server. If O(direct) is specified then the O(config_source) must be + specified as well. type: bool default: false -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Change the widget font to "Serif 12" community.general.gconftool2: key: "/desktop/gnome/interface/font_name" @@ -64,32 +69,45 @@ EXAMPLES = """ value: "Serif 12" """ -RETURN = ''' - key: - description: The key specified in the module parameters - returned: success - type: str - sample: /desktop/gnome/interface/font_name - value_type: - description: The type of the value that was changed - returned: success - type: str - sample: string - value: - description: The value of the preference key after executing the module - returned: success - type: str - sample: "Serif 12" -... -''' +RETURN = r""" +key: + description: The key specified in the module parameters. + returned: success + type: str + sample: /desktop/gnome/interface/font_name +value_type: + description: The type of the value that was changed. + returned: success + type: str + sample: string +value: + description: + - The value of the preference key after executing the module or V(null) if key is removed. + - From community.general 7.0.0 onwards it returns V(null) for a non-existent O(key), and returned V("") before that. + returned: success + type: str + sample: "Serif 12" +previous_value: + description: + - The value of the preference key before executing the module. + - From community.general 7.0.0 onwards it returns V(null) for a non-existent O(key), and returned V("") before that. + returned: success + type: str + sample: "Serif 12" +version: + description: Version of gconftool-2. + type: str + returned: always + sample: "3.2.6" + version_added: 10.0.0 +""" from ansible_collections.community.general.plugins.module_utils.module_helper import StateModuleHelper from ansible_collections.community.general.plugins.module_utils.gconftool2 import gconftool2_runner class GConftool(StateModuleHelper): - change_params = 'value', - diff_params = 'value', + diff_params = ('value', ) output_params = ('key', 'value_type') facts_params = ('key', 'value_type') facts_name = 'gconftool2' @@ -98,13 +116,12 @@ class GConftool(StateModuleHelper): key=dict(type='str', required=True, no_log=False), value_type=dict(type='str', choices=['bool', 'float', 'int', 'string']), value=dict(type='str'), - state=dict(type='str', required=True, choices=['absent', 'get', 'present']), + state=dict(type='str', required=True, choices=['absent', 'present']), direct=dict(type='bool', default=False), config_source=dict(type='str'), ), required_if=[ ('state', 'present', ['value', 'value_type']), - ('state', 'absent', ['value']), ('direct', True, ['config_source']), ], supports_check_mode=True, @@ -112,40 +129,44 @@ class GConftool(StateModuleHelper): def __init_module__(self): self.runner = gconftool2_runner(self.module, check_rc=True) - if self.vars.state != "get": - if not self.vars.direct and self.vars.config_source is not None: - self.module.fail_json(msg='If the "config_source" is specified then "direct" must be "true"') + if not self.vars.direct and self.vars.config_source is not None: + self.do_raise('If the "config_source" is specified then "direct" must be "true"') + + with self.runner("version") as ctx: + rc, out, err = ctx.run() + self.vars.version = out.strip() self.vars.set('previous_value', self._get(), fact=True) self.vars.set('value_type', self.vars.value_type) + self.vars.set('_value', self.vars.previous_value, output=False, change=True) self.vars.set_meta('value', initial_value=self.vars.previous_value) self.vars.set('playbook_value', self.vars.value, fact=True) def _make_process(self, fail_on_err): def process(rc, out, err): if err and fail_on_err: - self.ansible.fail_json(msg='gconftool-2 failed with error: %s' % (str(err))) - self.vars.value = out.rstrip() + self.do_raise('gconftool-2 failed with error:\n%s' % err.strip()) + out = out.rstrip() + self.vars.value = None if out == "" else out return self.vars.value return process def _get(self): return self.runner("state key", output_process=self._make_process(False)).run(state="get") - def state_get(self): - self.deprecate( - msg="State 'get' is deprecated. Please use the module community.general.gconftool2_info instead", - version="8.0.0", collection_name="community.general" - ) - def state_absent(self): with self.runner("state key", output_process=self._make_process(False)) as ctx: ctx.run() + self.vars.set('run_info', ctx.run_info, verbosity=4) self.vars.set('new_value', None, fact=True) + self.vars._value = None def state_present(self): with self.runner("direct config_source value_type state key value", output_process=self._make_process(True)) as ctx: - self.vars.set('new_value', ctx.run(), fact=True) + ctx.run() + self.vars.set('run_info', ctx.run_info, verbosity=4) + self.vars.set('new_value', self._get(), fact=True) + self.vars._value = self.vars.new_value def main(): diff --git a/plugins/modules/gconftool2_info.py b/plugins/modules/gconftool2_info.py index 282065b95e..f1047bccee 100644 --- a/plugins/modules/gconftool2_info.py +++ b/plugins/modules/gconftool2_info.py @@ -1,16 +1,14 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2022, Alexei Znamensky # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: gconftool2_info author: - - "Alexei Znamensky (@russoz)" + - "Alexei Znamensky (@russoz)" short_description: Retrieve GConf configurations version_added: 5.1.0 description: @@ -21,32 +19,39 @@ extends_documentation_fragment: options: key: description: - - The key name for an element in the GConf database. + - The key name for an element in the GConf database. type: str required: true -notes: - - See man gconftool-2(1) for more details. seealso: + - name: C(gconftool-2) command manual page + description: Manual page for the command. + link: https://help.gnome.org/admin//system-admin-guide/2.32/gconf-6.html.en - name: gconf repository (archived) description: Git repository for the project. It is an archived project, so the repository is read-only. link: https://gitlab.gnome.org/Archive/gconf -''' +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Get value for a certain key in the database. community.general.gconftool2_info: key: /desktop/gnome/background/picture_filename register: result """ -RETURN = ''' - value: - description: +RETURN = r""" +value: + description: - The value of the property. - returned: success - type: str - sample: Monospace 10 -''' + returned: success + type: str + sample: Monospace 10 +version: + description: Version of gconftool-2. + type: str + returned: always + sample: "3.2.6" + version_added: 10.0.0 +""" from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper from ansible_collections.community.general.plugins.module_utils.gconftool2 import gconftool2_runner @@ -63,6 +68,9 @@ class GConftoolInfo(ModuleHelper): def __init_module__(self): self.runner = gconftool2_runner(self.module, check_rc=True) + with self.runner("version") as ctx: + rc, out, err = ctx.run() + self.vars.version = out.strip() def __run__(self): with self.runner.context(args_order=["state", "key"]) as ctx: diff --git a/plugins/modules/gem.py b/plugins/modules/gem.py index a44683c786..535e420e71 100644 --- a/plugins/modules/gem.py +++ b/plugins/modules/gem.py @@ -1,20 +1,24 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2013, Johan Wiren # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: gem short_description: Manage Ruby gems description: - Manage installation and uninstallation of Ruby gems. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: name: type: str @@ -24,7 +28,7 @@ options: state: type: str description: - - The desired state of the gem. C(latest) ensures that the latest version is installed. + - The desired state of the gem. V(latest) ensures that the latest version is installed. required: false choices: [present, absent, latest] default: present @@ -42,38 +46,37 @@ options: repository: type: str description: - - The repository from which the gem will be installed + - The repository from which the gem is installed. required: false aliases: [source] user_install: description: - - Install gem in user's local gems cache or for all users + - Install gem in user's local gems cache or for all users. required: false type: bool default: true executable: type: path description: - - Override the path to the gem executable + - Override the path to the gem executable. required: false install_dir: type: path description: - - Install the gems into a specific directory. - These gems will be independent from the global installed ones. - Specifying this requires user_install to be false. + - Install the gems into a specific directory. These gems are independent from the global installed ones. Specifying + this requires user_install to be false. required: false bindir: type: path description: - - Install executables into a specific directory. + - Install executables into a specific directory. version_added: 3.3.0 norc: type: bool default: true description: - - Avoid loading any C(.gemrc) file. Ignored for RubyGems prior to 2.5.2. - - The default changed from C(false) to C(true) in community.general 6.0.0. + - Avoid loading any C(.gemrc) file. Ignored for RubyGems prior to 2.5.2. + - The default changed from V(false) to V(true) in community.general 6.0.0. version_added: 3.3.0 env_shebang: description: @@ -101,7 +104,7 @@ options: build_flags: type: str description: - - Allow adding build flags for gem compilation + - Allow adding build flags for gem compilation. required: false force: description: @@ -110,11 +113,11 @@ options: default: false type: bool author: - - "Ansible Core Team" - - "Johan Wiren (@johanwiren)" -''' + - "Ansible Core Team" + - "Johan Wiren (@johanwiren)" +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Install version 1.0 of vagrant community.general.gem: name: vagrant @@ -131,7 +134,7 @@ EXAMPLES = ''' name: rake gem_source: /path/to/gems/rake-1.0.gem state: present -''' +""" import re @@ -238,7 +241,7 @@ def uninstall(module): if module.params['force']: cmd.append('--force') cmd.append(module.params['name']) - module.run_command(cmd, environ_update=environ, check_rc=True) + return module.run_command(cmd, environ_update=environ, check_rc=True) def install(module): @@ -290,22 +293,22 @@ def main(): module = AnsibleModule( argument_spec=dict( - executable=dict(required=False, type='path'), - gem_source=dict(required=False, type='path'), - include_dependencies=dict(required=False, default=True, type='bool'), + executable=dict(type='path'), + gem_source=dict(type='path'), + include_dependencies=dict(default=True, type='bool'), name=dict(required=True, type='str'), - repository=dict(required=False, aliases=['source'], type='str'), - state=dict(required=False, default='present', choices=['present', 'absent', 'latest'], type='str'), - user_install=dict(required=False, default=True, type='bool'), - install_dir=dict(required=False, type='path'), + repository=dict(aliases=['source'], type='str'), + state=dict(default='present', choices=['present', 'absent', 'latest'], type='str'), + user_install=dict(default=True, type='bool'), + install_dir=dict(type='path'), bindir=dict(type='path'), norc=dict(type='bool', default=True), - pre_release=dict(required=False, default=False, type='bool'), - include_doc=dict(required=False, default=False, type='bool'), - env_shebang=dict(required=False, default=False, type='bool'), - version=dict(required=False, type='str'), - build_flags=dict(required=False, type='str'), - force=dict(required=False, default=False, type='bool'), + pre_release=dict(default=False, type='bool'), + include_doc=dict(default=False, type='bool'), + env_shebang=dict(default=False, type='bool'), + version=dict(type='str'), + build_flags=dict(type='str'), + force=dict(default=False, type='bool'), ), supports_check_mode=True, mutually_exclusive=[['gem_source', 'repository'], ['gem_source', 'version']], @@ -329,9 +332,21 @@ def main(): changed = True elif module.params['state'] == 'absent': if exists(module): - uninstall(module) - changed = True - + command_output = uninstall(module) + if command_output is not None and exists(module): + rc, out, err = command_output + module.fail_json( + msg=( + "Failed to uninstall gem '%s': it is still present after 'gem uninstall'. " + "This usually happens with default or system gems provided by the OS, " + "which cannot be removed with the gem command." + ) % module.params['name'], + rc=rc, + stdout=out, + stderr=err + ) + else: + changed = True result = {} result['name'] = module.params['name'] result['state'] = module.params['state'] diff --git a/plugins/modules/gio_mime.py b/plugins/modules/gio_mime.py new file mode 100644 index 0000000000..a7fb3c4fcf --- /dev/null +++ b/plugins/modules/gio_mime.py @@ -0,0 +1,106 @@ +#!/usr/bin/python +# Copyright (c) 2022, Alexei Znamensky +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: gio_mime +author: + - "Alexei Znamensky (@russoz)" +short_description: Set default handler for MIME type, for applications using Gnome GIO +version_added: 7.5.0 +description: + - This module allows configuring the default handler for a specific MIME type, to be used by applications built with the + Gnome GIO API. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: full +options: + mime_type: + description: + - MIME type for which a default handler is set. + type: str + required: true + handler: + description: + - Default handler set for the MIME type. + type: str + required: true +notes: + - This module is a thin wrapper around the C(gio mime) command (and subcommand). + - See man gio(1) for more details. +seealso: + - name: C(gio) command manual page + description: Manual page for the command. + link: https://man.archlinux.org/man/gio.1 + - name: GIO Documentation + description: Reference documentation for the GIO API.. + link: https://docs.gtk.org/gio/ +""" + +EXAMPLES = r""" +- name: Set chrome as the default handler for https + community.general.gio_mime: + mime_type: x-scheme-handler/https + handler: google-chrome.desktop + register: result +""" + +RETURN = r""" +handler: + description: + - The handler set as default. + returned: success + type: str + sample: google-chrome.desktop +version: + description: Version of gio. + type: str + returned: always + sample: "2.80.0" + version_added: 10.0.0 +""" + +from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper +from ansible_collections.community.general.plugins.module_utils.gio_mime import gio_mime_runner, gio_mime_get + + +class GioMime(ModuleHelper): + output_params = ['handler'] + module = dict( + argument_spec=dict( + mime_type=dict(type='str', required=True), + handler=dict(type='str', required=True), + ), + supports_check_mode=True, + ) + + def __init_module__(self): + self.runner = gio_mime_runner(self.module, check_rc=True) + with self.runner("version") as ctx: + rc, out, err = ctx.run() + self.vars.version = out.strip() + self.vars.set_meta("handler", initial_value=gio_mime_get(self.runner, self.vars.mime_type), diff=True, change=True) + + def __run__(self): + check_mode_return = (0, 'Module executed in check mode', '') + if self.vars.has_changed: + with self.runner.context(args_order="mime mime_type handler", check_mode_skip=True, check_mode_return=check_mode_return) as ctx: + rc, out, err = ctx.run() + self.vars.stdout = out + self.vars.stderr = err + self.vars.set("run_info", ctx.run_info, verbosity=4) + + +def main(): + GioMime.execute() + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/git_config.py b/plugins/modules/git_config.py index 9191de0e87..30af5b43fd 100644 --- a/plugins/modules/git_config.py +++ b/plugins/modules/git_config.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2015, Marius Gedminas # Copyright (c) 2016, Matthew Gamble @@ -7,70 +6,78 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: git_config author: - Matthew Gamble (@djmattyg007) - Marius Gedminas (@mgedmin) requirements: ['git'] -short_description: Read and write git configuration +short_description: Update git configuration description: - - The C(git_config) module changes git configuration by invoking 'git config'. - This is needed if you don't want to use M(ansible.builtin.template) for the entire git - config file (e.g. because you need to change just C(user.email) in - /etc/.git/config). Solutions involving M(ansible.builtin.command) are cumbersome or - don't work correctly in check mode. + - The M(community.general.git_config) module changes git configuration by invoking C(git config). This is needed if you + do not want to use M(ansible.builtin.template) for the entire git config file (for example because you need to change + just C(user.email) in C(/etc/.git/config)). Solutions involving M(ansible.builtin.command) are cumbersome or do not work + correctly in check mode. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - list_all: - description: - - List all settings (optionally limited to a given I(scope)). - type: bool - default: false name: description: - - The name of the setting. If no value is supplied, the value will - be read from the config if it has been set. + - The name of the setting. type: str + required: true repo: description: - - Path to a git repository for reading and writing values from a - specific repo. + - Path to a git repository for reading and writing values from a specific repo. type: path file: description: - - Path to an adhoc git configuration file to be managed using the C(file) scope. + - Path to an adhoc git configuration file to be managed using the V(file) scope. type: path version_added: 2.0.0 scope: description: - Specify which scope to read/set values from. - This is required when setting config values. - - If this is set to C(local), you must also specify the C(repo) parameter. - - If this is set to C(file), you must also specify the C(file) parameter. - - It defaults to system only when not using I(list_all)=C(true). - choices: [ "file", "local", "global", "system" ] + - If this is set to V(local), you must also specify the O(repo) parameter. + - If this is set to V(file), you must also specify the O(file) parameter. + - It defaults to system. + choices: ["file", "local", "global", "system"] type: str state: description: - - "Indicates the setting should be set/unset. - This parameter has higher precedence than I(value) parameter: - when I(state)=absent and I(value) is defined, I(value) is discarded." - choices: [ 'present', 'absent' ] + - 'Indicates the setting should be set/unset. This parameter has higher precedence than O(value) parameter: when O(state=absent) + and O(value) is defined, O(value) is discarded.' + choices: ['present', 'absent'] default: 'present' type: str value: description: - - When specifying the name of a single setting, supply a value to - set that setting to the given value. + - When specifying the name of a single setting, supply a value to set that setting to the given value. + - From community.general 11.0.0 on, O(value) is required if O(state=present). To read values, use the M(community.general.git_config_info) + module instead. type: str -''' + add_mode: + description: + - Specify if a value should replace the existing value(s) or if the new value should be added alongside other values + with the same name. + - This option is only relevant when adding/replacing values. If O(state=absent) or values are just read out, this option + is not considered. + choices: ["add", "replace-all"] + type: str + default: "replace-all" + version_added: 8.1.0 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Add a setting to ~/.gitconfig community.general.git_config: name: alias.ci @@ -111,55 +118,25 @@ EXAMPLES = ''' name: color.ui value: auto +- name: Add several options for the same name + community.general.git_config: + name: push.pushoption + value: "{{ item }}" + add_mode: add + loop: + - merge_request.create + - merge_request.draft + - name: Make etckeeper not complaining when it is invoked by cron community.general.git_config: name: user.email repo: /etc scope: local value: 'root@{{ ansible_fqdn }}' +""" -- name: Read individual values from git config - community.general.git_config: - name: alias.ci - scope: global - -- name: Scope system is also assumed when reading values, unless list_all=true - community.general.git_config: - name: alias.diffc - -- name: Read all values from git config - community.general.git_config: - list_all: true - scope: global - -- name: When list_all is yes and no scope is specified, you get configuration from all scopes - community.general.git_config: - list_all: true - -- name: Specify a repository to include local settings - community.general.git_config: - list_all: true - repo: /path/to/repo.git -''' - -RETURN = ''' ---- -config_value: - description: When I(list_all=false) and value is not set, a string containing the value of the setting in name - returned: success - type: str - sample: "vim" - -config_values: - description: When I(list_all=true), a dict containing key/value pairs of multiple configuration settings - returned: success - type: dict - sample: - core.editor: "vim" - color.ui: "auto" - alias.diffc: "diff --cached" - alias.remotev: "remote -v" -''' +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule @@ -167,20 +144,19 @@ from ansible.module_utils.basic import AnsibleModule def main(): module = AnsibleModule( argument_spec=dict( - list_all=dict(required=False, type='bool', default=False), - name=dict(type='str'), + name=dict(type='str', required=True), repo=dict(type='path'), file=dict(type='path'), - scope=dict(required=False, type='str', choices=['file', 'local', 'global', 'system']), - state=dict(required=False, type='str', default='present', choices=['present', 'absent']), - value=dict(required=False), + add_mode=dict(type='str', default='replace-all', choices=['add', 'replace-all']), + scope=dict(type='str', choices=['file', 'local', 'global', 'system']), + state=dict(type='str', default='present', choices=['present', 'absent']), + value=dict(), ), - mutually_exclusive=[['list_all', 'name'], ['list_all', 'value'], ['list_all', 'state']], required_if=[ ('scope', 'local', ['repo']), - ('scope', 'file', ['file']) + ('scope', 'file', ['file']), + ('state', 'present', ['value']), ], - required_one_of=[['list_all', 'name']], supports_check_mode=True, ) git_path = module.get_bin_path('git', True) @@ -190,94 +166,98 @@ def main(): # Set the locale to C to ensure consistent messages. module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') - if params['name']: - name = params['name'] - else: - name = None + name = params['name'] or '' + unset = params['state'] == 'absent' + new_value = params['value'] or '' + add_mode = params['add_mode'] - if params['scope']: - scope = params['scope'] - elif params['list_all']: - scope = None - else: - scope = 'system' + if not unset and not new_value: + module.fail_json(msg="If state=present, a value must be specified. Use the community.general.git_config_info module to read a config value.") - if params['state'] == 'absent': - unset = 'unset' - params['value'] = None - else: - unset = None + scope = determine_scope(params) + cwd = determine_cwd(scope, params) - if params['value']: - new_value = params['value'] - else: - new_value = None + base_args = [git_path, "config", "--includes"] - args = [git_path, "config", "--includes"] - if params['list_all']: - args.append('-l') if scope == 'file': - args.append('-f') - args.append(params['file']) + base_args.append('-f') + base_args.append(params['file']) elif scope: - args.append("--" + scope) - if name: - args.append(name) + base_args.append("--" + scope) - if scope == 'local': - dir = params['repo'] - elif params['list_all'] and params['repo']: - # Include local settings from a specific repo when listing all available settings - dir = params['repo'] - else: - # Run from root directory to avoid accidentally picking up any local config settings - dir = "/" + list_args = list(base_args) - (rc, out, err) = module.run_command(args, cwd=dir, expand_user_and_vars=False) - if params['list_all'] and scope and rc == 128 and 'unable to read config file' in err: - # This just means nothing has been set at the given scope - module.exit_json(changed=False, msg='', config_values={}) - elif rc >= 2: + list_args.append("--get-all") + list_args.append(name) + + (rc, out, err) = module.run_command(list_args, cwd=cwd, expand_user_and_vars=False) + + if rc >= 2: # If the return code is 1, it just means the option hasn't been set yet, which is fine. - module.fail_json(rc=rc, msg=err, cmd=' '.join(args)) + module.fail_json(rc=rc, msg=err, cmd=' '.join(list_args)) - if params['list_all']: - values = out.rstrip().splitlines() - config_values = {} - for value in values: - k, v = value.split('=', 1) - config_values[k] = v - module.exit_json(changed=False, msg='', config_values=config_values) - elif not new_value and not unset: - module.exit_json(changed=False, msg='', config_value=out.rstrip()) - elif unset and not out: + old_values = out.rstrip().splitlines() + + if unset and not out: module.exit_json(changed=False, msg='no setting to unset') + elif new_value in old_values and (len(old_values) == 1 or add_mode == "add") and not unset: + module.exit_json(changed=False, msg="") + + # Until this point, the git config was just read and in case no change is needed, the module has already exited. + + set_args = list(base_args) + if unset: + set_args.append("--unset-all") + set_args.append(name) else: - old_value = out.rstrip() - if old_value == new_value: - module.exit_json(changed=False, msg="") + set_args.append("--" + add_mode) + set_args.append(name) + set_args.append(new_value) if not module.check_mode: - if unset: - args.insert(len(args) - 1, "--" + unset) - cmd = args - else: - cmd = args + [new_value] - (rc, out, err) = module.run_command(cmd, cwd=dir, ignore_invalid_cwd=False, expand_user_and_vars=False) + (rc, out, err) = module.run_command(set_args, cwd=cwd, ignore_invalid_cwd=False, expand_user_and_vars=False) if err: - module.fail_json(rc=rc, msg=err, cmd=cmd) + module.fail_json(rc=rc, msg=err, cmd=set_args) + + if unset: + after_values = [] + elif add_mode == "add": + after_values = old_values + [new_value] + else: + after_values = [new_value] module.exit_json( msg='setting changed', diff=dict( - before_header=' '.join(args), - before=old_value + "\n", - after_header=' '.join(args), - after=(new_value or '') + "\n" + before_header=' '.join(set_args), + before=build_diff_value(old_values), + after_header=' '.join(set_args), + after=build_diff_value(after_values), ), changed=True ) +def determine_scope(params): + if params['scope']: + return params['scope'] + return 'system' + + +def build_diff_value(value): + if not value: + return "\n" + if len(value) == 1: + return value[0] + "\n" + return value + + +def determine_cwd(scope, params): + if scope == 'local': + return params['repo'] + # Run from root directory to avoid accidentally picking up any local config settings + return "/" + + if __name__ == '__main__': main() diff --git a/plugins/modules/git_config_info.py b/plugins/modules/git_config_info.py new file mode 100644 index 0000000000..b5a15fe94f --- /dev/null +++ b/plugins/modules/git_config_info.py @@ -0,0 +1,182 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Guenther Grill +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + + +DOCUMENTATION = r""" +module: git_config_info +author: + - Guenther Grill (@guenhter) +version_added: 8.1.0 +requirements: ['git'] +short_description: Read git configuration +description: + - The M(community.general.git_config_info) module reads the git configuration by invoking C(git config). +extends_documentation_fragment: + - community.general.attributes + - community.general.attributes.info_module +options: + name: + description: + - The name of the setting to read. + - If not provided, all settings are returned as RV(config_values). + type: str + path: + description: + - Path to a git repository or file for reading values from a specific repo. + - If O(scope) is V(local), this must point to a repository to read from. + - If O(scope) is V(file), this must point to specific git config file to read from. + - Otherwise O(path) is ignored if set. + type: path + scope: + description: + - Specify which scope to read values from. + - If set to V(global), the global git config is used. O(path) is ignored. + - If set to V(system), the system git config is used. O(path) is ignored. + - If set to V(local), O(path) must be set to the repo to read from. + - If set to V(file), O(path) must be set to the config file to read from. + choices: ["global", "system", "local", "file"] + default: "system" + type: str +""" + +EXAMPLES = r""" +- name: Read a system wide config + community.general.git_config_info: + name: core.editor + register: result + +- name: Show value of core.editor + ansible.builtin.debug: + msg: "{{ result.config_value | default('(not set)', true) }}" + +- name: Read a global config from ~/.gitconfig + community.general.git_config_info: + name: alias.remotev + scope: global + +- name: Read a project specific config + community.general.git_config_info: + name: color.ui + scope: local + path: /etc + +- name: Read all global values + community.general.git_config_info: + scope: global + +- name: Read all system wide values + community.general.git_config_info: + +- name: Read all values of a specific file + community.general.git_config_info: + scope: file + path: /etc/gitconfig +""" + +RETURN = r""" +config_value: + description: >- + When O(name) is set, a string containing the value of the setting in name. If O(name) is not set, empty. If a config key + such as V(push.pushoption) has more then one entry, just the first one is returned here. + returned: success if O(name) is set + type: str + sample: "vim" + +config_values: + description: + - This is a dictionary mapping a git configuration setting to a list of its values. + - When O(name) is not set, all configuration settings are returned here. + - When O(name) is set, only the setting specified in O(name) is returned here. If that setting is not set, the key is + still present, and its value is an empty list. + returned: success + type: dict + sample: + core.editor: ["vim"] + color.ui: ["auto"] + push.pushoption: ["merge_request.create", "merge_request.draft"] + alias.remotev: ["remote -v"] +""" + +from ansible.module_utils.basic import AnsibleModule + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(type="str"), + path=dict(type="path"), + scope=dict(type="str", default="system", choices=["global", "system", "local", "file"]), + ), + required_if=[ + ("scope", "local", ["path"]), + ("scope", "file", ["path"]), + ], + required_one_of=[], + supports_check_mode=True, + ) + + # We check error message for a pattern, so we need to make sure the messages appear in the form we're expecting. + # Set the locale to C to ensure consistent messages. + module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C') + + name = module.params["name"] + path = module.params["path"] + scope = module.params["scope"] + + run_cwd = path if scope == "local" else "/" + args = build_args(module, name, path, scope) + + (rc, out, err) = module.run_command(args, cwd=run_cwd, expand_user_and_vars=False) + + if rc == 128 and "unable to read config file" in err: + # This just means nothing has been set at the given scope + pass + elif rc >= 2: + # If the return code is 1, it just means the option hasn't been set yet, which is fine. + module.fail_json(rc=rc, msg=err, cmd=" ".join(args)) + + output_lines = out.strip("\0").split("\0") if out else [] + + if name: + first_value = output_lines[0] if output_lines else "" + config_values = {name: output_lines} + module.exit_json(changed=False, msg="", config_value=first_value, config_values=config_values) + else: + config_values = text_to_dict(output_lines) + module.exit_json(changed=False, msg="", config_value="", config_values=config_values) + + +def build_args(module, name, path, scope): + git_path = module.get_bin_path("git", True) + args = [git_path, "config", "--includes", "--null", "--" + scope] + + if scope == "file": + args.append(path) + + if name: + args.extend(["--get-all", name]) + else: + args.append("--list") + + return args + + +def text_to_dict(text_lines): + config_values = {} + for value in text_lines: + k, v = value.split("\n", 1) + if k in config_values: + config_values[k].append(v) + else: + config_values[k] = [v] + return config_values + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/github_deploy_key.py b/plugins/modules/github_deploy_key.py index bd32438b0a..799ee300c5 100644 --- a/plugins/modules/github_deploy_key.py +++ b/plugins/modules/github_deploy_key.py @@ -1,27 +1,31 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) Ansible project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: github_deploy_key author: "Ali (@bincyber)" short_description: Manages deploy keys for GitHub repositories description: - - "Adds or removes deploy keys for GitHub repositories. Supports authentication using username and password, - username and password and 2-factor authentication code (OTP), OAuth2 token, or personal access token. Admin - rights on the repository are required." + - Adds or removes deploy keys for GitHub repositories. Supports authentication using username and password, username and + password and 2-factor authentication code (OTP), OAuth2 token, or personal access token. Admin rights on the repository + are required. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: github_url: description: - - The base URL of the GitHub API + - The base URL of the GitHub API. required: false type: str version_added: '0.2.0' @@ -30,19 +34,19 @@ options: description: - The name of the individual account or organization that owns the GitHub repository. required: true - aliases: [ 'account', 'organization' ] + aliases: ['account', 'organization'] type: str repo: description: - The name of the GitHub repository. required: true - aliases: [ 'repository' ] + aliases: ['repository'] type: str name: description: - The name for the deploy key. required: true - aliases: [ 'title', 'label' ] + aliases: ['title', 'label'] type: str key: description: @@ -51,41 +55,43 @@ options: type: str read_only: description: - - If C(true), the deploy key will only be able to read repository contents. Otherwise, the deploy key will be able to read and write. + - If V(true), the deploy key is only able to read repository contents. Otherwise, the deploy key is able to read and + write. type: bool default: true state: description: - The state of the deploy key. default: "present" - choices: [ "present", "absent" ] + choices: ["present", "absent"] type: str force: description: - - If C(true), forcefully adds the deploy key by deleting any existing deploy key with the same public key or title. + - If V(true), forcefully adds the deploy key by deleting any existing deploy key with the same public key or title. type: bool default: false username: description: - - The username to authenticate with. Should not be set when using personal access token + - The username to authenticate with. Should not be set when using personal access token. type: str password: description: - - The password to authenticate with. Alternatively, a personal access token can be used instead of I(username) and I(password) combination. + - The password to authenticate with. Alternatively, a personal access token can be used instead of O(username) and O(password) + combination. type: str token: description: - - The OAuth2 token or personal access token to authenticate with. Mutually exclusive with I(password). + - The OAuth2 token or personal access token to authenticate with. Mutually exclusive with O(password). type: str otp: description: - - The 6 digit One Time Password for 2-Factor Authentication. Required together with I(username) and I(password). + - The 6 digit One Time Password for 2-Factor Authentication. Required together with O(username) and O(password). type: int notes: - - "Refer to GitHub's API documentation here: https://developer.github.com/v3/repos/keys/." -''' + - "Refer to GitHub's API documentation here: https://developer.github.com/v3/repos/keys/." +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Add a new read-only deploy key to a GitHub repository using basic authentication community.general.github_deploy_key: owner: "johndoe" @@ -145,33 +151,33 @@ EXAMPLES = ''' read_only: true username: "janedoe" password: "supersecretpassword" -''' +""" -RETURN = ''' +RETURN = r""" msg: - description: the status message describing what occurred - returned: always - type: str - sample: "Deploy key added successfully" + description: The status message describing what occurred. + returned: always + type: str + sample: "Deploy key added successfully" http_status_code: - description: the HTTP status code returned by the GitHub API - returned: failed - type: int - sample: 400 + description: The HTTP status code returned by the GitHub API. + returned: failed + type: int + sample: 400 error: - description: the error message returned by the GitHub API - returned: failed - type: str - sample: "key is already in use" + description: The error message returned by the GitHub API. + returned: failed + type: str + sample: "key is already in use" id: - description: the key identifier assigned by GitHub for the deploy key - returned: changed - type: int - sample: 24381901 -''' + description: The key identifier assigned by GitHub for the deploy key. + returned: changed + type: int + sample: 24381901 +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url @@ -220,7 +226,7 @@ class GithubDeployKey(object): yield self.module.from_json(resp.read()) links = {} - for x, y in findall(r'<([^>]+)>;\s*rel="(\w+)"', info["link"]): + for x, y in findall(r'<([^>]+)>;\s*rel="(\w+)"', info.get("link", '')): links[y] = x url = links.get('next') @@ -251,7 +257,12 @@ class GithubDeployKey(object): key_id = response_body["id"] self.module.exit_json(changed=True, msg="Deploy key successfully added", id=key_id) elif status_code == 422: - self.module.exit_json(changed=False, msg="Deploy key already exists") + # there might be multiple reasons for a 422 + # so we must check if the reason is that the key already exists + if self.get_existing_key(): + self.module.exit_json(changed=False, msg="Deploy key already exists") + else: + self.handle_error(method="POST", info=info) else: self.handle_error(method="POST", info=info) @@ -271,6 +282,8 @@ class GithubDeployKey(object): body = info.get('body') if body: err = self.module.from_json(body)['message'] + else: + err = None if status_code == 401: self.module.fail_json(msg="Failed to connect to {0} due to invalid credentials".format(self.github_url), http_status_code=status_code, error=err) @@ -288,18 +301,18 @@ class GithubDeployKey(object): def main(): module = AnsibleModule( argument_spec=dict( - github_url=dict(required=False, type='str', default="https://api.github.com"), + github_url=dict(type='str', default="https://api.github.com"), owner=dict(required=True, type='str', aliases=['account', 'organization']), repo=dict(required=True, type='str', aliases=['repository']), name=dict(required=True, type='str', aliases=['title', 'label']), key=dict(required=True, type='str', no_log=False), - read_only=dict(required=False, type='bool', default=True), + read_only=dict(type='bool', default=True), state=dict(default='present', choices=['present', 'absent']), - force=dict(required=False, type='bool', default=False), - username=dict(required=False, type='str'), - password=dict(required=False, type='str', no_log=True), - otp=dict(required=False, type='int', no_log=True), - token=dict(required=False, type='str', no_log=True) + force=dict(type='bool', default=False), + username=dict(type='str'), + password=dict(type='str', no_log=True), + otp=dict(type='int', no_log=True), + token=dict(type='str', no_log=True) ), mutually_exclusive=[ ['password', 'token'] diff --git a/plugins/modules/github_issue.py b/plugins/modules/github_issue.py index d49837499a..2923917eec 100644 --- a/plugins/modules/github_issue.py +++ b/plugins/modules/github_issue.py @@ -1,20 +1,25 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017-18, Abhijeet Kasurde # # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: github_issue short_description: View GitHub issue description: - - View GitHub issue for a given repository and organization. + - View GitHub issue for a given repository and organization. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: repo: description: @@ -33,24 +38,24 @@ options: type: int action: description: - - Get various details about issue depending upon action specified. + - Get various details about issue depending upon action specified. default: 'get_status' choices: - - 'get_status' + - get_status type: str author: - - Abhijeet Kasurde (@Akasurde) -''' + - Abhijeet Kasurde (@Akasurde) +""" -RETURN = ''' +RETURN = r""" issue_status: - description: State of the GitHub issue - type: str - returned: success - sample: open, closed -''' + description: State of the GitHub issue. + type: str + returned: success + sample: open, closed +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Check if GitHub issue is closed or not community.general.github_issue: organization: ansible @@ -63,7 +68,7 @@ EXAMPLES = ''' ansible.builtin.debug: msg: Do something when issue 23642 is open when: r.issue_status == 'open' -''' +""" import json diff --git a/plugins/modules/github_key.py b/plugins/modules/github_key.py index 3c7ee7bd7b..957d130774 100644 --- a/plugins/modules/github_key.py +++ b/plugins/modules/github_key.py @@ -1,19 +1,25 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: github_key short_description: Manage GitHub access keys description: - - Creates, removes, or updates GitHub access keys. + - Creates, removes, or updates GitHub access keys. + - Works with both GitHub.com and GitHub Enterprise Server installations. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: token: description: @@ -22,12 +28,12 @@ options: type: str name: description: - - SSH key name + - SSH key name. required: true type: str pubkey: description: - - SSH public key value. Required when I(state=present). + - SSH public key value. Required when O(state=present). type: str state: description: @@ -37,34 +43,64 @@ options: type: str force: description: - - The default is C(true), which will replace the existing remote key - if it's different than C(pubkey). If C(false), the key will only be - set if no key with the given I(name) exists. + - The default is V(true), which replaces the existing remote key if it is different than O(pubkey). If V(false), the + key is only set if no key with the given O(name) exists. type: bool default: true + api_url: + description: + - URL to the GitHub API if not using github.com but your own GitHub Enterprise instance. + type: str + default: 'https://api.github.com' + version_added: "11.0.0" author: Robert Estelle (@erydo) -''' +""" -RETURN = ''' +RETURN = r""" deleted_keys: - description: An array of key objects that were deleted. Only present on state=absent - type: list - returned: When state=absent - sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': false}] + description: An array of key objects that were deleted. Only present on state=absent. + type: list + returned: When state=absent + sample: + [ + { + "id": 0, + "key": "BASE64 encoded key", + "url": "http://example.com/github key", + "created_at": "YYYY-MM-DDTHH:MM:SZ", + "read_only": false + } + ] matching_keys: - description: An array of keys matching the specified name. Only present on state=present - type: list - returned: When state=present - sample: [{'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': false}] + description: An array of keys matching the specified name. Only present on state=present. + type: list + returned: When state=present + sample: + [ + { + "id": 0, + "key": "BASE64 encoded key", + "url": "http://example.com/github key", + "created_at": "YYYY-MM-DDTHH:MM:SZ", + "read_only": false + } + ] key: - description: Metadata about the key just created. Only present on state=present - type: dict - returned: success - sample: {'id': 0, 'key': 'BASE64 encoded key', 'url': 'http://example.com/github key', 'created_at': 'YYYY-MM-DDTHH:MM:SZ', 'read_only': false} -''' + description: Metadata about the key just created. Only present on state=present. + type: dict + returned: success + sample: + { + "id": 0, + "key": "BASE64 encoded key", + "url": "http://example.com/github key", + "created_at": "YYYY-MM-DDTHH:MM:SZ", + "read_only": false + } +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Read SSH public key to authorize ansible.builtin.shell: cat /home/foo/.ssh/id_rsa.pub register: ssh_pub_key @@ -75,17 +111,33 @@ EXAMPLES = ''' name: Access Key for Some Machine token: '{{ github_access_token }}' pubkey: '{{ ssh_pub_key.stdout }}' -''' +# Alternatively, a single task can be used reading a key from a file on the controller +- name: Authorize key with GitHub + community.general.github_key: + name: Access Key for Some Machine + token: '{{ github_access_token }}' + pubkey: "{{ lookup('ansible.builtin.file', '/home/foo/.ssh/id_rsa.pub') }}" +# GitHub Enterprise Server usage +- name: Authorize key with GitHub Enterprise + community.general.github_key: + name: Access Key for Some Machine + token: '{{ github_enterprise_token }}' + pubkey: "{{ lookup('ansible.builtin.file', '/home/foo/.ssh/id_rsa.pub') }}" + api_url: 'https://github.company.com/api/v3' +""" + +import datetime import json import re from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import fetch_url - -API_BASE = 'https://api.github.com' +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) class GitHubResponse(object): @@ -107,9 +159,10 @@ class GitHubResponse(object): class GitHubSession(object): - def __init__(self, module, token): + def __init__(self, module, token, api_url): self.module = module self.token = token + self.api_url = api_url.rstrip('/') def request(self, method, url, data=None): headers = { @@ -127,7 +180,7 @@ class GitHubSession(object): def get_all_keys(session): - url = API_BASE + '/user/keys' + url = session.api_url + '/user/keys' result = [] while url: r = session.request('GET', url) @@ -138,21 +191,20 @@ def get_all_keys(session): def create_key(session, name, pubkey, check_mode): if check_mode: - from datetime import datetime - now = datetime.utcnow() + now_t = now() return { 'id': 0, 'key': pubkey, 'title': name, 'url': 'http://example.com/CHECK_MODE_GITHUB_KEY', - 'created_at': datetime.strftime(now, '%Y-%m-%dT%H:%M:%SZ'), + 'created_at': datetime.datetime.strftime(now_t, '%Y-%m-%dT%H:%M:%SZ'), 'read_only': False, 'verified': False } else: return session.request( 'POST', - API_BASE + '/user/keys', + session.api_url + '/user/keys', data=json.dumps({'title': name, 'key': pubkey})).json() @@ -161,7 +213,7 @@ def delete_keys(session, to_delete, check_mode): return for key in to_delete: - session.request('DELETE', API_BASE + '/user/keys/%s' % key["id"]) + session.request('DELETE', session.api_url + '/user/keys/%s' % key["id"]) def ensure_key_absent(session, name, check_mode): @@ -209,6 +261,7 @@ def main(): 'pubkey': {}, 'state': {'choices': ['present', 'absent'], 'default': 'present'}, 'force': {'default': True, 'type': 'bool'}, + 'api_url': {'default': 'https://api.github.com', 'type': 'str'}, } module = AnsibleModule( argument_spec=argument_spec, @@ -220,6 +273,7 @@ def main(): state = module.params['state'] force = module.params['force'] pubkey = module.params.get('pubkey') + api_url = module.params.get('api_url') if pubkey: pubkey_parts = pubkey.split(' ') @@ -229,7 +283,7 @@ def main(): elif state == 'present': module.fail_json(msg='"pubkey" is required when state=present') - session = GitHubSession(module, token) + session = GitHubSession(module, token, api_url) if state == 'present': result = ensure_key_present(module, session, name, pubkey, force=force, check_mode=module.check_mode) diff --git a/plugins/modules/github_release.py b/plugins/modules/github_release.py index 0b3a5a886c..933b9c8bd1 100644 --- a/plugins/modules/github_release.py +++ b/plugins/modules/github_release.py @@ -1,79 +1,83 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright Ansible Team # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: github_release short_description: Interact with GitHub Releases description: - - Fetch metadata about GitHub Releases + - Fetch metadata about GitHub Releases. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - token: - description: - - GitHub Personal Access Token for authenticating. Mutually exclusive with C(password). - type: str - user: - description: - - The GitHub account that owns the repository - type: str - required: true - password: - description: - - The GitHub account password for the user. Mutually exclusive with C(token). - type: str - repo: - description: - - Repository name - type: str - required: true - action: - description: - - Action to perform - type: str - required: true - choices: [ 'latest_release', 'create_release' ] - tag: - description: - - Tag name when creating a release. Required when using action is set to C(create_release). - type: str - target: - description: - - Target of release when creating a release - type: str - name: - description: - - Name of release when creating a release - type: str - body: - description: - - Description of the release when creating a release - type: str - draft: - description: - - Sets if the release is a draft or not. (boolean) - type: bool - default: false - prerelease: - description: - - Sets if the release is a prerelease or not. (boolean) - type: bool - default: false + token: + description: + - GitHub Personal Access Token for authenticating. Mutually exclusive with O(password). + type: str + user: + description: + - The GitHub account that owns the repository. + type: str + required: true + password: + description: + - The GitHub account password for the user. Mutually exclusive with O(token). + type: str + repo: + description: + - Repository name. + type: str + required: true + action: + description: + - Action to perform. + type: str + required: true + choices: ['latest_release', 'create_release'] + tag: + description: + - Tag name when creating a release. Required when using O(action=create_release). + type: str + target: + description: + - Target of release when creating a release. + type: str + name: + description: + - Name of release when creating a release. + type: str + body: + description: + - Description of the release when creating a release. + type: str + draft: + description: + - Sets if the release is a draft or not. (boolean). + type: bool + default: false + prerelease: + description: + - Sets if the release is a prerelease or not. (boolean). + type: bool + default: false author: - - "Adrian Moisey (@adrianmoisey)" + - "Adrian Moisey (@adrianmoisey)" requirements: - - "github3.py >= 1.0.0a3" -''' + - "github3.py >= 1.0.0a3" +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Get latest release of a public repository community.general.github_release: user: ansible @@ -87,7 +91,7 @@ EXAMPLES = ''' repo: testrepo action: latest_release -- name: Get latest release of test repo using username and password. Ansible 2.4. +- name: Get latest release of test repo using username and password community.general.github_release: user: testuser password: secret123 @@ -104,16 +108,15 @@ EXAMPLES = ''' target: master name: My Release body: Some description +""" -''' - -RETURN = ''' +RETURN = r""" tag: - description: Version of the created/latest release. - type: str - returned: success - sample: 1.1.0 -''' + description: Version of the created/latest release. + type: str + returned: success + sample: 1.1.0 +""" import traceback @@ -177,13 +180,29 @@ def main(): else: gh_obj = github3.GitHub() - # test if we're actually logged in - if password or login_token: + # GitHub's token formats: + # - ghp_ - Personal access token (classic) + # - github_pat_ - Fine-grained personal access token + # - gho_ - OAuth access token + # - ghu_ - User access token for a GitHub App + # - ghs_ - Installation access token for a GitHub App + # - ghr_ - Refresh token for a GitHub App + # + # References: + # https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/about-authentication-to-github#githubs-token-formats + # + # Test if we're actually logged in, but skip this check for some token prefixes + SKIPPED_TOKEN_PREFIXES = ['ghs_'] + if password or (login_token and not any(login_token.startswith(prefix) for prefix in SKIPPED_TOKEN_PREFIXES)): gh_obj.me() except github3.exceptions.AuthenticationFailed as e: module.fail_json(msg='Failed to connect to GitHub: %s' % to_native(e), details="Please check username and password or token " "for repository %s" % repo) + except github3.exceptions.GitHubError as e: + module.fail_json(msg='GitHub API error: %s' % to_native(e), + details="Please check username and password or token " + "for repository %s" % repo) repository = gh_obj.repository(user, repo) diff --git a/plugins/modules/github_repo.py b/plugins/modules/github_repo.py index d01312fcfa..601bea71fd 100644 --- a/plugins/modules/github_repo.py +++ b/plugins/modules/github_repo.py @@ -1,100 +1,100 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2021, Álvaro Torres Cogollo # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: github_repo short_description: Manage your repositories on Github version_added: 2.2.0 description: -- Manages Github repositories using PyGithub library. -- Authentication can be done with I(access_token) or with I(username) and I(password). + - Manages Github repositories using PyGithub library. + - Authentication can be done with O(access_token) or with O(username) and O(password). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: username: description: - - Username used for authentication. - - This is only needed when not using I(access_token). + - Username used for authentication. + - This is only needed when not using O(access_token). type: str required: false password: description: - - Password used for authentication. - - This is only needed when not using I(access_token). + - Password used for authentication. + - This is only needed when not using O(access_token). type: str required: false access_token: description: - - Token parameter for authentication. - - This is only needed when not using I(username) and I(password). + - Token parameter for authentication. + - This is only needed when not using O(username) and O(password). type: str required: false name: description: - - Repository name. + - Repository name. type: str required: true description: description: - - Description for the repository. - - Defaults to empty if I(force_defaults=true), which is the default in this module. - - Defaults to empty if I(force_defaults=false) when creating a new repository. - - This is only used when I(state) is C(present). + - Description for the repository. + - Defaults to empty if O(force_defaults=true), which is the default in this module. + - Defaults to empty if O(force_defaults=false) when creating a new repository. + - This is only used when O(state) is V(present). type: str required: false private: description: - - Whether the repository should be private or not. - - Defaults to C(false) if I(force_defaults=true), which is the default in this module. - - Defaults to C(false) if I(force_defaults=false) when creating a new repository. - - This is only used when I(state) is C(present). + - Whether the repository should be private or not. + - Defaults to V(false) if O(force_defaults=true), which is the default in this module. + - Defaults to V(false) if O(force_defaults=false) when creating a new repository. + - This is only used when O(state=present). type: bool required: false state: description: - - Whether the repository should exist or not. + - Whether the repository should exist or not. type: str default: present - choices: [ absent, present ] + choices: [absent, present] required: false organization: description: - - Organization for the repository. - - When I(state) is C(present), the repository will be created in the current user profile. + - Organization for the repository. + - When O(state=present), the repository is created in the current user profile. type: str required: false api_url: description: - - URL to the GitHub API if not using github.com but you own instance. + - URL to the GitHub API if not using github.com but you own instance. type: str default: 'https://api.github.com' version_added: "3.5.0" force_defaults: description: - - Overwrite current I(description) and I(private) attributes with defaults if set to C(true), which currently is the default. - - The default for this option will be deprecated in a future version of this collection, and eventually change to C(false). + - If V(true), overwrite current O(description) and O(private) attributes with defaults. + - V(true) is deprecated for this option and will not be allowed starting in community.general 13.0.0. V(false) will be the default value then. type: bool - default: true required: false version_added: 4.1.0 requirements: -- PyGithub>=1.54 + - PyGithub>=1.54 notes: -- For Python 3, PyGithub>=1.54 should be used. -- "For Python 3.5, PyGithub==1.54 should be used. More information: U(https://pygithub.readthedocs.io/en/latest/changes.html#version-1-54-november-30-2020)." -- "For Python 2.7, PyGithub==1.45 should be used. More information: U(https://pygithub.readthedocs.io/en/latest/changes.html#version-1-45-december-29-2019)." -- Supports C(check_mode). + - For Python 3, PyGithub>=1.54 should be used. author: -- Álvaro Torres Cogollo (@atorrescogollo) -''' + - Álvaro Torres Cogollo (@atorrescogollo) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a Github repository community.general.github_repo: access_token: mytoken @@ -114,14 +114,14 @@ EXAMPLES = ''' name: myrepo state: absent register: result -''' +""" -RETURN = ''' +RETURN = r""" repo: description: Repository information as JSON. See U(https://docs.github.com/en/rest/reference/repos#get-a-repository). - returned: success and I(state) is C(present) + returned: success and O(state=present) type: dict -''' +""" import traceback from ansible.module_utils.basic import AnsibleModule, missing_required_lib @@ -240,13 +240,13 @@ def main(): password=dict(type='str', no_log=True), access_token=dict(type='str', no_log=True), name=dict(type='str', required=True), - state=dict(type='str', required=False, default="present", + state=dict(type='str', default="present", choices=["present", "absent"]), - organization=dict(type='str', required=False, default=None), + organization=dict(type='str', ), private=dict(type='bool'), description=dict(type='str'), - api_url=dict(type='str', required=False, default='https://api.github.com'), - force_defaults=dict(type='bool', default=True), + api_url=dict(type='str', default='https://api.github.com'), + force_defaults=dict(type='bool'), ) module = AnsibleModule( argument_spec=module_args, @@ -256,6 +256,11 @@ def main(): mutually_exclusive=[('username', 'access_token')] ) + if module.params['force_defaults'] is None: + module.deprecate("'force_defaults=true' is deprecated and will not be allowed in community.general 13.0.0, use 'force_defaults=false' instead", + version="13.0.0", collection_name="community.general") + module.params['force_defaults'] = True + if not HAS_GITHUB_PACKAGE: module.fail_json(msg=missing_required_lib( "PyGithub"), exception=GITHUB_IMP_ERR) diff --git a/plugins/modules/github_webhook.py b/plugins/modules/github_webhook.py index b97087d221..867bfc380e 100644 --- a/plugins/modules/github_webhook.py +++ b/plugins/modules/github_webhook.py @@ -1,40 +1,44 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (c) 2018, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: github_webhook short_description: Manage GitHub webhooks description: - - "Create and delete GitHub webhooks" + - Create and delete GitHub webhooks. requirements: - "PyGithub >= 1.3.5" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: repository: description: - - Full name of the repository to configure a hook for + - Full name of the repository to configure a hook for. type: str required: true aliases: - repo url: description: - - URL to which payloads will be delivered + - URL to which payloads are delivered. type: str required: true content_type: description: - - The media type used to serialize the payloads + - The media type used to serialize the payloads. type: str required: false - choices: [ form, json ] + choices: [form, json] default: form secret: description: @@ -43,61 +47,57 @@ options: required: false insecure_ssl: description: - - > - Flag to indicate that GitHub should skip SSL verification when calling - the hook. + - Flag to indicate that GitHub should skip SSL verification when calling the hook. required: false type: bool default: false events: description: - - > - A list of GitHub events the hook is triggered for. Events are listed at - U(https://developer.github.com/v3/activity/events/types/). Required - unless C(state) is C(absent) + - A list of GitHub events the hook is triggered for. Events are listed at U(https://developer.github.com/v3/activity/events/types/). + Required unless O(state=absent). required: false type: list elements: str active: description: - - Whether or not the hook is active + - Whether or not the hook is active. required: false type: bool default: true state: description: - - Whether the hook should be present or absent + - Whether the hook should be present or absent. type: str required: false - choices: [ absent, present ] + choices: [absent, present] default: present user: description: - - User to authenticate to GitHub as + - User to authenticate to GitHub as. type: str required: true password: description: - - Password to authenticate to GitHub with + - Password to authenticate to GitHub with. type: str required: false token: description: - - Token to authenticate to GitHub with + - Token to authenticate to GitHub with. type: str required: false github_url: description: - - Base URL of the GitHub API + - Base URL of the GitHub API. type: str required: false default: https://api.github.com author: - "Chris St. Pierre (@stpierre)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a new webhook that triggers on push (password auth) community.general.github_webhook: repository: ansible/ansible @@ -128,16 +128,15 @@ EXAMPLES = ''' state: absent user: "{{ github_user }}" password: "{{ github_password }}" -''' +""" -RETURN = ''' ---- +RETURN = r""" hook_id: - description: The GitHub ID of the hook created/updated + description: The GitHub ID of the hook created/updated. returned: when state is 'present' type: int sample: 6206 -''' +""" import traceback @@ -154,13 +153,18 @@ from ansible.module_utils.common.text.converters import to_native def _create_hook_config(module): - return { + hook_config = { "url": module.params["url"], "content_type": module.params["content_type"], - "secret": module.params.get("secret"), "insecure_ssl": "1" if module.params["insecure_ssl"] else "0" } + secret = module.params.get("secret") + if secret: + hook_config["secret"] = secret + + return hook_config + def create_hook(repo, module): config = _create_hook_config(module) @@ -202,25 +206,16 @@ def main(): argument_spec=dict( repository=dict(type='str', required=True, aliases=['repo']), url=dict(type='str', required=True), - content_type=dict( - type='str', - choices=('json', 'form'), - required=False, - default='form'), - secret=dict(type='str', required=False, no_log=True), - insecure_ssl=dict(type='bool', required=False, default=False), - events=dict(type='list', elements='str', required=False), - active=dict(type='bool', required=False, default=True), - state=dict( - type='str', - required=False, - choices=('absent', 'present'), - default='present'), + content_type=dict(type='str', choices=('json', 'form'), default='form'), + secret=dict(type='str', no_log=True), + insecure_ssl=dict(type='bool', default=False), + events=dict(type='list', elements='str', ), + active=dict(type='bool', default=True), + state=dict(type='str', choices=('absent', 'present'), default='present'), user=dict(type='str', required=True), - password=dict(type='str', required=False, no_log=True), - token=dict(type='str', required=False, no_log=True), - github_url=dict( - type='str', required=False, default="https://api.github.com")), + password=dict(type='str', no_log=True), + token=dict(type='str', no_log=True), + github_url=dict(type='str', default="https://api.github.com")), mutually_exclusive=(('password', 'token'),), required_one_of=(("password", "token"),), required_if=(("state", "present", ("events",)),), diff --git a/plugins/modules/github_webhook_info.py b/plugins/modules/github_webhook_info.py index a6f7c3e52c..30b3e719f3 100644 --- a/plugins/modules/github_webhook_info.py +++ b/plugins/modules/github_webhook_info.py @@ -1,20 +1,16 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (c) 2018, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: github_webhook_info short_description: Query information about GitHub webhooks description: - - "Query information about GitHub webhooks" - - This module was called C(github_webhook_facts) before Ansible 2.9. The usage did not change. + - Query information about GitHub webhooks. requirements: - "PyGithub >= 1.3.5" extends_documentation_fragment: @@ -23,38 +19,38 @@ extends_documentation_fragment: options: repository: description: - - Full name of the repository to configure a hook for + - Full name of the repository to configure a hook for. type: str required: true aliases: - repo user: description: - - User to authenticate to GitHub as + - User to authenticate to GitHub as. type: str required: true password: description: - - Password to authenticate to GitHub with + - Password to authenticate to GitHub with. type: str required: false token: description: - - Token to authenticate to GitHub with + - Token to authenticate to GitHub with. type: str required: false github_url: description: - - Base URL of the github api + - Base URL of the GitHub API. type: str required: false default: https://api.github.com author: - "Chris St. Pierre (@stpierre)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: List hooks for a repository (password auth) community.general.github_webhook_info: repository: ansible/ansible @@ -69,27 +65,27 @@ EXAMPLES = ''' token: "{{ github_user_api_token }}" github_url: https://github.example.com/api/v3/ register: myrepo_webhooks -''' +""" -RETURN = ''' ---- +RETURN = r""" hooks: - description: A list of hooks that exist for the repo + description: A list of hooks that exist for the repo. returned: always type: list elements: dict sample: - - { - "has_shared_secret": true, - "url": "https://jenkins.example.com/ghprbhook/", - "events": ["issue_comment", "pull_request"], - "insecure_ssl": "1", - "content_type": "json", - "active": true, - "id": 6206, - "last_response": {"status": "active", "message": "OK", "code": 200} - } -''' + - has_shared_secret: true + url: https://jenkins.example.com/ghprbhook/ + events: [issue_comment, pull_request] + insecure_ssl: "1" + content_type: json + active: true + id: 6206 + last_response: + status: active + message: OK + code: 200 +""" import traceback @@ -126,10 +122,10 @@ def main(): argument_spec=dict( repository=dict(type='str', required=True, aliases=["repo"]), user=dict(type='str', required=True), - password=dict(type='str', required=False, no_log=True), - token=dict(type='str', required=False, no_log=True), + password=dict(type='str', no_log=True), + token=dict(type='str', no_log=True), github_url=dict( - type='str', required=False, default="https://api.github.com")), + type='str', default="https://api.github.com")), mutually_exclusive=(('password', 'token'), ), required_one_of=(("password", "token"), ), supports_check_mode=True) diff --git a/plugins/modules/gitlab_branch.py b/plugins/modules/gitlab_branch.py index e57ca4922f..514300a924 100644 --- a/plugins/modules/gitlab_branch.py +++ b/plugins/modules/gitlab_branch.py @@ -1,13 +1,11 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2021, Werner Dijkerman (ikben@werner-dijkerman.nl) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: gitlab_branch short_description: Create or delete a branch version_added: 4.2.0 @@ -16,11 +14,17 @@ description: author: - paytroff (@paytroff) requirements: - - python >= 2.7 - python-gitlab >= 2.3.0 extends_documentation_fragment: - community.general.auth_basic - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: none + diff_mode: + support: none options: state: @@ -42,12 +46,12 @@ options: ref_branch: description: - Reference branch to create from. - - This must be specified if I(state=present). + - This must be specified if O(state=present). type: str -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create branch branch2 from main community.general.gitlab_branch: api_url: https://gitlab.com @@ -64,11 +68,10 @@ EXAMPLES = ''' project: "group1/project1" branch: branch2 state: absent +""" -''' - -RETURN = ''' -''' +RETURN = r""" +""" import traceback @@ -77,7 +80,7 @@ from ansible.module_utils.api import basic_auth_argument_spec from ansible_collections.community.general.plugins.module_utils.version import LooseVersion from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, gitlab_authentication, gitlab, ensure_gitlab_package + auth_argument_spec, gitlab_authentication, gitlab ) @@ -113,7 +116,7 @@ def main(): argument_spec.update( project=dict(type='str', required=True), branch=dict(type='str', required=True), - ref_branch=dict(type='str', required=False), + ref_branch=dict(type='str'), state=dict(type='str', default="present", choices=["absent", "present"]), ) @@ -137,7 +140,9 @@ def main(): ], supports_check_mode=False ) - ensure_gitlab_package(module) + + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) project = module.params['project'] branch = module.params['branch'] @@ -149,7 +154,6 @@ def main(): module.fail_json(msg="community.general.gitlab_proteched_branch requires python-gitlab Python module >= 2.3.0 (installed version: [%s])." " Please upgrade python-gitlab to version 2.3.0 or above." % gitlab_version) - gitlab_instance = gitlab_authentication(module) this_gitlab = GitlabBranch(module=module, project=project, gitlab_instance=gitlab_instance) this_branch = this_gitlab.get_branch(branch) diff --git a/plugins/modules/gitlab_deploy_key.py b/plugins/modules/gitlab_deploy_key.py index f4a9fb29fa..9252341863 100644 --- a/plugins/modules/gitlab_deploy_key.py +++ b/plugins/modules/gitlab_deploy_key.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) # Copyright (c) 2018, Marcus Watkins @@ -8,28 +7,33 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: gitlab_deploy_key short_description: Manages GitLab project deploy keys description: - - Adds, updates and removes project deploy keys + - Adds, updates and removes project deploy keys. author: - Marcus Watkins (@marwatk) - Guillaume Martinez (@Lunik) requirements: - - python >= 2.7 - python-gitlab python module extends_documentation_fragment: - community.general.auth_basic - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: project: description: - - Id or Full path of project in the form of group/name. + - ID or Full path of project in the form of group/name. required: true type: str title: @@ -39,7 +43,7 @@ options: type: str key: description: - - Deploy key + - Deploy key. required: true type: str can_push: @@ -49,14 +53,14 @@ options: default: false state: description: - - When C(present) the deploy key added to the project if it doesn't exist. - - When C(absent) it will be removed from the project if it exists. + - When V(present) the deploy key is added to the project if it does not exist. + - When V(absent) it is removed from the project if it exists. default: present type: str - choices: [ "present", "absent" ] -''' + choices: ["present", "absent"] +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: "Adding a project deploy key" community.general.gitlab_deploy_key: api_url: https://gitlab.example.com/ @@ -82,39 +86,38 @@ EXAMPLES = ''' project: "my_group/my_project" state: absent key: "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAIEAiPWx6WM4lhHNedGfBpPJNPpZ7yKu+dnn1SJejgt4596k6YjzGGphH2TUxwKzxcKDKKezwkpfnxPkSMkuEspGRt/aZZ9w..." +""" -''' - -RETURN = ''' +RETURN = r""" msg: - description: Success or failure message + description: Success or failure message. returned: always type: str sample: "Success" result: - description: json parsed response from the server + description: JSON-parsed response from the server. returned: always type: dict error: - description: the error message returned by the GitLab API + description: The error message returned by the GitLab API. returned: failed type: str sample: "400: key is already in use" deploy_key: - description: API object + description: API object. returned: always type: dict -''' +""" from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, find_project, gitlab_authentication, gitlab, ensure_gitlab_package + auth_argument_spec, find_project, gitlab_authentication, gitlab, list_all_kwargs ) @@ -190,9 +193,9 @@ class GitLabDeployKey(object): changed = False for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - if getattr(deploy_key, arg_key) != arguments[arg_key]: - setattr(deploy_key, arg_key, arguments[arg_key]) + if arg_value is not None: + if getattr(deploy_key, arg_key) != arg_value: + setattr(deploy_key, arg_key, arg_value) changed = True return (changed, deploy_key) @@ -202,9 +205,8 @@ class GitLabDeployKey(object): @param key_title Title of the key ''' def find_deploy_key(self, project, key_title): - deploy_keys = project.keys.list(all=True) - for deploy_key in deploy_keys: - if (deploy_key.title == key_title): + for deploy_key in project.keys.list(**list_all_kwargs): + if deploy_key.title == key_title: return deploy_key ''' @@ -254,7 +256,9 @@ def main(): ], supports_check_mode=True, ) - ensure_gitlab_package(module) + + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) state = module.params['state'] project_identifier = module.params['project'] @@ -262,8 +266,6 @@ def main(): key_keyfile = module.params['key'] key_can_push = module.params['can_push'] - gitlab_instance = gitlab_authentication(module) - gitlab_deploy_key = GitLabDeployKey(module, gitlab_instance) project = find_project(gitlab_instance, project_identifier) diff --git a/plugins/modules/gitlab_group.py b/plugins/modules/gitlab_group.py index d099a0c274..6356ce2e2c 100644 --- a/plugins/modules/gitlab_group.py +++ b/plugins/modules/gitlab_group.py @@ -1,100 +1,185 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) # Copyright (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: gitlab_group short_description: Creates/updates/deletes GitLab Groups description: - - When the group does not exist in GitLab, it will be created. - - When the group does exist and state=absent, the group will be deleted. + - When the group does not exist in GitLab, it is created. + - When the group does exist and O(state=absent), the group is deleted. author: - Werner Dijkerman (@dj-wasabi) - Guillaume Martinez (@Lunik) requirements: - - python >= 2.7 - python-gitlab python module extends_documentation_fragment: - community.general.auth_basic - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - Name of the group you want to create. - required: true - type: str - path: - description: - - The path of the group you want to create, this will be api_url/group_path - - If not supplied, the group_name will be used. - type: str - description: - description: - - A description for the group. - type: str - state: - description: - - create or delete group. - - Possible values are present and absent. - default: present - type: str - choices: ["present", "absent"] - parent: - description: - - Allow to create subgroups - - Id or Full path of parent group in the form of group/name - type: str - visibility: - description: - - Default visibility of the group - choices: ["private", "internal", "public"] - default: private - type: str - project_creation_level: - description: - - Determine if developers can create projects in the group. - choices: ["developer", "maintainer", "noone"] - type: str - version_added: 3.7.0 auto_devops_enabled: description: - Default to Auto DevOps pipeline for all projects within this group. type: bool version_added: 3.7.0 - subgroup_creation_level: - description: - - Allowed to create subgroups. - choices: ["maintainer", "owner"] - type: str - version_added: 3.7.0 - require_two_factor_authentication: - description: - - Require all users in this group to setup two-factor authentication. - type: bool - version_added: 3.7.0 avatar_path: description: - Absolute path image to configure avatar. File size should not exceed 200 kb. - This option is only used on creation, not for updates. type: path version_added: 4.2.0 -''' + default_branch: + description: + - All merge requests and commits are made against this branch unless you specify a different one. + type: str + version_added: 9.5.0 + description: + description: + - A description for the group. + type: str + enabled_git_access_protocol: + description: + - V(all) means SSH and HTTP(S) is enabled. + - V(ssh) means only SSH is enabled. + - V(http) means only HTTP(S) is enabled. + - Only available for top level groups. + choices: ["all", "ssh", "http"] + type: str + version_added: 9.5.0 + force_delete: + description: + - Force delete group even if projects in it. + - Used only when O(state=absent). + type: bool + default: false + version_added: 7.5.0 + lfs_enabled: + description: + - Projects in this group can use Git LFS. + type: bool + version_added: 9.5.0 + lock_duo_features_enabled: + description: + - Enforce GitLab Duo features for all subgroups. + - Only available for top level groups. + type: bool + version_added: 9.5.0 + membership_lock: + description: + - Users cannot be added to projects in this group. + type: bool + version_added: 9.5.0 + mentions_disabled: + description: + - Group mentions are disabled. + type: bool + version_added: 9.5.0 + name: + description: + - Name of the group you want to create. + required: true + type: str + parent: + description: + - Allow to create subgroups. + - ID or Full path of parent group in the form of group/name. + type: str + path: + description: + - The path of the group you want to create, this is O(api_url)/O(path). + - If not supplied, O(name) is used. + type: str + prevent_forking_outside_group: + description: + - Prevent forking outside of the group. + type: bool + version_added: 9.5.0 + prevent_sharing_groups_outside_hierarchy: + description: + - Members cannot invite groups outside of this group and its subgroups. + - Only available for top level groups. + type: bool + version_added: 9.5.0 + project_creation_level: + description: + - Determine if developers can create projects in the group. + choices: ["developer", "maintainer", "noone"] + type: str + version_added: 3.7.0 + request_access_enabled: + description: + - Users can request access (if visibility is public or internal). + type: bool + version_added: 9.5.0 + service_access_tokens_expiration_enforced: + description: + - Service account token expiration. + - Changes do not affect existing token expiration dates. + - Only available for top level groups. + type: bool + version_added: 9.5.0 + share_with_group_lock: + description: + - Projects cannot be shared with other groups. + type: bool + version_added: 9.5.0 + require_two_factor_authentication: + description: + - Require all users in this group to setup two-factor authentication. + type: bool + version_added: 3.7.0 + state: + description: + - Create or delete group. + - Possible values are present and absent. + default: present + type: str + choices: ["present", "absent"] + subgroup_creation_level: + description: + - Allowed to create subgroups. + choices: ["maintainer", "owner"] + type: str + version_added: 3.7.0 + two_factor_grace_period: + description: + - Delay 2FA enforcement (hours). + type: str + version_added: 9.5.0 + visibility: + description: + - Default visibility of the group. + choices: ["private", "internal", "public"] + default: private + type: str + wiki_access_level: + description: + - V(enabled) means everyone can access the wiki. + - V(private) means only members of this group can access the wiki. + - V(disabled) means group-level wiki is disabled. + choices: ["enabled", "private", "disabled"] + type: str + version_added: 9.5.0 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: "Delete GitLab Group" community.general.gitlab_group: api_url: https://gitlab.example.com/ api_token: "{{ access_token }}" - validate_certs: false name: my_first_group state: absent @@ -133,38 +218,38 @@ EXAMPLES = ''' project_creation_level: noone auto_devops_enabled: false subgroup_creation_level: maintainer -''' +""" -RETURN = ''' +RETURN = r""" msg: - description: Success or failure message + description: Success or failure message. returned: always type: str sample: "Success" result: - description: json parsed response from the server + description: JSON-parsed response from the server. returned: always type: dict error: - description: the error message returned by the GitLab API + description: The error message returned by the GitLab API. returned: failed type: str sample: "400: path is already in use" group: - description: API object + description: API object. returned: always type: dict -''' +""" from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, find_group, gitlab_authentication, gitlab, ensure_gitlab_package + auth_argument_spec, find_group, gitlab_authentication, gitlab ) @@ -190,23 +275,38 @@ class GitLabGroup(object): def create_or_update_group(self, name, parent, options): changed = False + payload = { + 'auto_devops_enabled': options['auto_devops_enabled'], + 'default_branch': options['default_branch'], + 'description': options['description'], + 'lfs_enabled': options['lfs_enabled'], + 'membership_lock': options['membership_lock'], + 'mentions_disabled': options['mentions_disabled'], + 'name': name, + 'path': options['path'], + 'prevent_forking_outside_group': options['prevent_forking_outside_group'], + 'project_creation_level': options['project_creation_level'], + 'request_access_enabled': options['request_access_enabled'], + 'require_two_factor_authentication': options['require_two_factor_authentication'], + 'share_with_group_lock': options['share_with_group_lock'], + 'subgroup_creation_level': options['subgroup_creation_level'], + 'visibility': options['visibility'], + 'wiki_access_level': options['wiki_access_level'], + } + if options.get('enabled_git_access_protocol') and parent is None: + payload['enabled_git_access_protocol'] = options['enabled_git_access_protocol'] + if options.get('lock_duo_features_enabled') and parent is None: + payload['lock_duo_features_enabled'] = options['lock_duo_features_enabled'] + if options.get('prevent_sharing_groups_outside_hierarchy') and parent is None: + payload['prevent_sharing_groups_outside_hierarchy'] = options['prevent_sharing_groups_outside_hierarchy'] + if options.get('service_access_tokens_expiration_enforced') and parent is None: + payload['service_access_tokens_expiration_enforced'] = options['service_access_tokens_expiration_enforced'] + if options.get('two_factor_grace_period'): + payload['two_factor_grace_period'] = int(options['two_factor_grace_period']) + # Because we have already call userExists in main() if self.group_object is None: - parent_id = self.get_group_id(parent) - - payload = { - 'name': name, - 'path': options['path'], - 'parent_id': parent_id, - 'visibility': options['visibility'], - 'project_creation_level': options['project_creation_level'], - 'auto_devops_enabled': options['auto_devops_enabled'], - 'subgroup_creation_level': options['subgroup_creation_level'], - } - if options.get('description'): - payload['description'] = options['description'] - if options.get('require_two_factor_authentication'): - payload['require_two_factor_authentication'] = options['require_two_factor_authentication'] + payload['parent_id'] = self.get_group_id(parent) group = self.create_group(payload) # add avatar to group @@ -217,15 +317,7 @@ class GitLabGroup(object): self._module.fail_json(msg='Cannot open {0}: {1}'.format(options['avatar_path'], e)) changed = True else: - changed, group = self.update_group(self.group_object, { - 'name': name, - 'description': options['description'], - 'visibility': options['visibility'], - 'project_creation_level': options['project_creation_level'], - 'auto_devops_enabled': options['auto_devops_enabled'], - 'subgroup_creation_level': options['subgroup_creation_level'], - 'require_two_factor_authentication': options['require_two_factor_authentication'], - }) + changed, group = self.update_group(self.group_object, payload) self.group_object = group if changed: @@ -248,7 +340,10 @@ class GitLabGroup(object): return True try: - group = self._gitlab.groups.create(arguments) + # Filter out None values + filtered = {arg_key: arg_value for arg_key, arg_value in arguments.items() if arg_value is not None} + + group = self._gitlab.groups.create(filtered) except (gitlab.exceptions.GitlabCreateError) as e: self._module.fail_json(msg="Failed to create group: %s " % to_native(e)) @@ -262,19 +357,25 @@ class GitLabGroup(object): changed = False for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - if getattr(group, arg_key) != arguments[arg_key]: - setattr(group, arg_key, arguments[arg_key]) + if arg_value is not None: + if getattr(group, arg_key) != arg_value: + setattr(group, arg_key, arg_value) changed = True return (changed, group) - def delete_group(self): + ''' + @param force To delete even if projects inside + ''' + def delete_group(self, force=False): group = self.group_object - if len(group.projects.list(all=False)) >= 1: + if not force and len(group.projects.list(all=False)) >= 1: self._module.fail_json( - msg="There are still projects in this group. These needs to be moved or deleted before this group can be removed.") + msg=("There are still projects in this group. " + "These needs to be moved or deleted before this group can be removed. " + "Use 'force_delete' to 'true' to force deletion of existing projects.") + ) else: if self._module.check_mode: return True @@ -285,7 +386,7 @@ class GitLabGroup(object): self._module.fail_json(msg="Failed to delete group: %s " % to_native(e)) ''' - @param name Name of the groupe + @param name Name of the group @param full_path Complete path of the Group including parent group path. / ''' def exists_group(self, project_identifier): @@ -301,27 +402,41 @@ def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) argument_spec.update(dict( - name=dict(type='str', required=True), - path=dict(type='str'), - description=dict(type='str'), - state=dict(type='str', default="present", choices=["absent", "present"]), - parent=dict(type='str'), - visibility=dict(type='str', default="private", choices=["internal", "private", "public"]), - project_creation_level=dict(type='str', choices=['developer', 'maintainer', 'noone']), auto_devops_enabled=dict(type='bool'), - subgroup_creation_level=dict(type='str', choices=['maintainer', 'owner']), - require_two_factor_authentication=dict(type='bool'), avatar_path=dict(type='path'), + default_branch=dict(type='str'), + description=dict(type='str'), + enabled_git_access_protocol=dict(type='str', choices=['all', 'ssh', 'http']), + force_delete=dict(type='bool', default=False), + lfs_enabled=dict(type='bool'), + lock_duo_features_enabled=dict(type='bool'), + membership_lock=dict(type='bool'), + mentions_disabled=dict(type='bool'), + name=dict(type='str', required=True), + parent=dict(type='str'), + path=dict(type='str'), + prevent_forking_outside_group=dict(type='bool'), + prevent_sharing_groups_outside_hierarchy=dict(type='bool'), + project_creation_level=dict(type='str', choices=['developer', 'maintainer', 'noone']), + request_access_enabled=dict(type='bool'), + require_two_factor_authentication=dict(type='bool'), + service_access_tokens_expiration_enforced=dict(type='bool'), + share_with_group_lock=dict(type='bool'), + state=dict(type='str', default="present", choices=["absent", "present"]), + subgroup_creation_level=dict(type='str', choices=['maintainer', 'owner']), + two_factor_grace_period=dict(type='str'), + visibility=dict(type='str', default="private", choices=["internal", "private", "public"]), + wiki_access_level=dict(type='str', choices=['enabled', 'private', 'disabled']), )) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ - ['api_username', 'api_token'], - ['api_username', 'api_oauth_token'], - ['api_username', 'api_job_token'], - ['api_token', 'api_oauth_token'], ['api_token', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_token'], ], required_together=[ ['api_username', 'api_password'], @@ -331,21 +446,35 @@ def main(): ], supports_check_mode=True, ) - ensure_gitlab_package(module) + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) + + auto_devops_enabled = module.params['auto_devops_enabled'] + avatar_path = module.params['avatar_path'] + default_branch = module.params['default_branch'] + description = module.params['description'] + enabled_git_access_protocol = module.params['enabled_git_access_protocol'] + force_delete = module.params['force_delete'] group_name = module.params['name'] group_path = module.params['path'] - description = module.params['description'] - state = module.params['state'] - parent_identifier = module.params['parent'] group_visibility = module.params['visibility'] + lfs_enabled = module.params['lfs_enabled'] + lock_duo_features_enabled = module.params['lock_duo_features_enabled'] + membership_lock = module.params['membership_lock'] + mentions_disabled = module.params['mentions_disabled'] + parent_identifier = module.params['parent'] + prevent_forking_outside_group = module.params['prevent_forking_outside_group'] + prevent_sharing_groups_outside_hierarchy = module.params['prevent_sharing_groups_outside_hierarchy'] project_creation_level = module.params['project_creation_level'] - auto_devops_enabled = module.params['auto_devops_enabled'] - subgroup_creation_level = module.params['subgroup_creation_level'] + request_access_enabled = module.params['request_access_enabled'] require_two_factor_authentication = module.params['require_two_factor_authentication'] - avatar_path = module.params['avatar_path'] - - gitlab_instance = gitlab_authentication(module) + service_access_tokens_expiration_enforced = module.params['service_access_tokens_expiration_enforced'] + share_with_group_lock = module.params['share_with_group_lock'] + state = module.params['state'] + subgroup_creation_level = module.params['subgroup_creation_level'] + two_factor_grace_period = module.params['two_factor_grace_period'] + wiki_access_level = module.params['wiki_access_level'] # Define default group_path based on group_name if group_path is None: @@ -357,7 +486,7 @@ def main(): if parent_identifier: parent_group = find_group(gitlab_instance, parent_identifier) if not parent_group: - module.fail_json(msg="Failed create GitLab group: Parent group doesn't exists") + module.fail_json(msg="Failed to create GitLab group: Parent group doesn't exist") group_exists = gitlab_group.exists_group(parent_group.full_path + '/' + group_path) else: @@ -365,21 +494,34 @@ def main(): if state == 'absent': if group_exists: - gitlab_group.delete_group() + gitlab_group.delete_group(force=force_delete) module.exit_json(changed=True, msg="Successfully deleted group %s" % group_name) else: - module.exit_json(changed=False, msg="Group deleted or does not exists") + module.exit_json(changed=False, msg="Group deleted or does not exist") if state == 'present': if gitlab_group.create_or_update_group(group_name, parent_group, { - "path": group_path, - "description": description, - "visibility": group_visibility, - "project_creation_level": project_creation_level, "auto_devops_enabled": auto_devops_enabled, - "subgroup_creation_level": subgroup_creation_level, - "require_two_factor_authentication": require_two_factor_authentication, "avatar_path": avatar_path, + "default_branch": default_branch, + "description": description, + "enabled_git_access_protocol": enabled_git_access_protocol, + "lfs_enabled": lfs_enabled, + "lock_duo_features_enabled": lock_duo_features_enabled, + "membership_lock": membership_lock, + "mentions_disabled": mentions_disabled, + "path": group_path, + "prevent_forking_outside_group": prevent_forking_outside_group, + "prevent_sharing_groups_outside_hierarchy": prevent_sharing_groups_outside_hierarchy, + "project_creation_level": project_creation_level, + "request_access_enabled": request_access_enabled, + "require_two_factor_authentication": require_two_factor_authentication, + "service_access_tokens_expiration_enforced": service_access_tokens_expiration_enforced, + "share_with_group_lock": share_with_group_lock, + "subgroup_creation_level": subgroup_creation_level, + "two_factor_grace_period": two_factor_grace_period, + "visibility": group_visibility, + "wiki_access_level": wiki_access_level, }): module.exit_json(changed=True, msg="Successfully created or updated the group %s" % group_name, group=gitlab_group.group_object._attrs) else: diff --git a/plugins/modules/gitlab_group_access_token.py b/plugins/modules/gitlab_group_access_token.py new file mode 100644 index 0000000000..59afc74bea --- /dev/null +++ b/plugins/modules/gitlab_group_access_token.py @@ -0,0 +1,339 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Zoran Krleza (zoran.krleza@true-north.hr) +# Based on code: +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright (c) 2018, Marcus Watkins +# Copyright (c) 2013, Phillip Gentry +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_group_access_token +short_description: Manages GitLab group access tokens +version_added: 8.4.0 +description: + - Creates and revokes group access tokens. +author: + - Zoran Krleza (@pixslx) +requirements: + - python-gitlab >= 3.1.0 +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes +notes: + - Access tokens can not be changed. If a parameter needs to be changed, an acceess token has to be recreated. Whether tokens + are recreated or not is controlled by the O(recreate) option, which defaults to V(never). + - Token string is contained in the result only when access token is created or recreated. It can not be fetched afterwards. + - Token matching is done by comparing O(name) option. +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + group: + description: + - ID or full path of group in the form of group/subgroup. + required: true + type: str + name: + description: + - Access token's name. + required: true + type: str + scopes: + description: + - Scope of the access token. + - The values V(read_virtual_registry), V(write_virtual_registry), V(manage_runner), and V(self_rotate) were added in community.general 11.3.0. + required: true + type: list + elements: str + aliases: ["scope"] + choices: + - api + - read_api + - read_registry + - write_registry + - read_virtual_registry + - write_virtual_registry + - read_repository + - write_repository + - create_runner + - manage_runner + - ai_features + - k8s_proxy + - self_rotate + access_level: + description: + - Access level of the access token. + - The value V(planner) was added in community.general 11.3.0. + type: str + default: maintainer + choices: ["guest", "planner", "reporter", "developer", "maintainer", "owner"] + expires_at: + description: + - Expiration date of the access token in C(YYYY-MM-DD) format. + - Make sure to quote this value in YAML to ensure it is kept as a string and not interpreted as a YAML date. + type: str + required: true + recreate: + description: + - Whether the access token is recreated if it already exists. + - When V(never) the token is never recreated. + - When V(always) the token is always recreated. + - When V(state_change) the token is recreated if there is a difference between desired state and actual state. + type: str + choices: ["never", "always", "state_change"] + default: never + state: + description: + - When V(present) the access token is added to the group if it does not exist. + - When V(absent) it is removed from the group if it exists. + default: present + type: str + choices: ["present", "absent"] +""" + +EXAMPLES = r""" +- name: "Creating a group access token" + community.general.gitlab_group_access_token: + api_url: https://gitlab.example.com/ + api_token: "somegitlabapitoken" + group: "my_group/my_subgroup" + name: "group_token" + expires_at: "2024-12-31" + access_level: developer + scopes: + - api + - read_api + - read_repository + - write_repository + state: present + +- name: "Revoking a group access token" + community.general.gitlab_group_access_token: + api_url: https://gitlab.example.com/ + api_token: "somegitlabapitoken" + group: "my_group/my_group" + name: "group_token" + expires_at: "2024-12-31" + scopes: + - api + - read_api + - read_repository + - write_repository + state: absent + +- name: "Change (recreate) existing token if its actual state is different than desired state" + community.general.gitlab_group_access_token: + api_url: https://gitlab.example.com/ + api_token: "somegitlabapitoken" + group: "my_group/my_group" + name: "group_token" + expires_at: "2024-12-31" + scopes: + - api + - read_api + - read_repository + - write_repository + recreate: state_change + state: present +""" + +RETURN = r""" +access_token: + description: + - API object. + - Only contains the value of the token if the token was created or recreated. + returned: success and O(state=present) + type: dict +""" + +from datetime import datetime + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, find_group, gitlab_authentication, gitlab +) + +ACCESS_LEVELS = dict(guest=10, planner=15, reporter=20, developer=30, maintainer=40, owner=50) + + +class GitLabGroupAccessToken(object): + def __init__(self, module, gitlab_instance): + self._module = module + self._gitlab = gitlab_instance + self.access_token_object = None + + ''' + @param project Project Object + @param group Group Object + @param arguments Attributes of the access_token + ''' + def create_access_token(self, group, arguments): + changed = False + if self._module.check_mode: + return True + + try: + self.access_token_object = group.access_tokens.create(arguments) + changed = True + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json(msg="Failed to create access token: %s " % to_native(e)) + + return changed + + ''' + @param project Project object + @param group Group Object + @param name of the access token + ''' + def find_access_token(self, group, name): + access_tokens = [x for x in group.access_tokens.list(all=True) if not getattr(x, 'revoked', False)] + for access_token in access_tokens: + if access_token.name == name: + self.access_token_object = access_token + return False + return False + + def revoke_access_token(self): + if self._module.check_mode: + return True + + changed = False + try: + self.access_token_object.delete() + changed = True + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json(msg="Failed to revoke access token: %s " % to_native(e)) + + return changed + + def access_tokens_equal(self): + if self.access_token_object.name != self._module.params['name']: + return False + if self.access_token_object.scopes != self._module.params['scopes']: + return False + if self.access_token_object.access_level != ACCESS_LEVELS[self._module.params['access_level']]: + return False + if self.access_token_object.expires_at != self._module.params['expires_at']: + return False + return True + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update(dict( + state=dict(type='str', default="present", choices=["absent", "present"]), + group=dict(type='str', required=True), + name=dict(type='str', required=True), + scopes=dict(type='list', + required=True, + aliases=['scope'], + elements='str', + choices=['api', + 'read_api', + 'read_registry', + 'write_registry', + 'read_virtual_registry', + 'write_virtual_registry', + 'read_repository', + 'write_repository', + 'create_runner', + 'manage_runner', + 'ai_features', + 'k8s_proxy', + 'self_rotate']), + access_level=dict(type='str', default='maintainer', choices=['guest', 'planner', 'reporter', 'developer', 'maintainer', 'owner']), + expires_at=dict(type='str', required=True), + recreate=dict(type='str', default='never', choices=['never', 'always', 'state_change']) + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'] + ], + required_together=[ + ['api_username', 'api_password'] + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + supports_check_mode=True + ) + + state = module.params['state'] + group_identifier = module.params['group'] + name = module.params['name'] + scopes = module.params['scopes'] + access_level_str = module.params['access_level'] + expires_at = module.params['expires_at'] + recreate = module.params['recreate'] + + access_level = ACCESS_LEVELS[access_level_str] + + try: + datetime.strptime(expires_at, '%Y-%m-%d') + except ValueError: + module.fail_json(msg="Argument expires_at is not in required format YYYY-MM-DD") + + gitlab_instance = gitlab_authentication(module) + + gitlab_access_token = GitLabGroupAccessToken(module, gitlab_instance) + + group = find_group(gitlab_instance, group_identifier) + if group is None: + module.fail_json(msg="Failed to create access token: group %s does not exists" % group_identifier) + + gitlab_access_token_exists = False + gitlab_access_token.find_access_token(group, name) + if gitlab_access_token.access_token_object is not None: + gitlab_access_token_exists = True + + if state == 'absent': + if gitlab_access_token_exists: + gitlab_access_token.revoke_access_token() + module.exit_json(changed=True, msg="Successfully deleted access token %s" % name) + else: + module.exit_json(changed=False, msg="Access token does not exists") + + if state == 'present': + if gitlab_access_token_exists: + if gitlab_access_token.access_tokens_equal(): + if recreate == 'always': + gitlab_access_token.revoke_access_token() + gitlab_access_token.create_access_token(group, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at}) + module.exit_json(changed=True, msg="Successfully recreated access token", access_token=gitlab_access_token.access_token_object._attrs) + else: + module.exit_json(changed=False, msg="Access token already exists", access_token=gitlab_access_token.access_token_object._attrs) + else: + if recreate == 'never': + module.fail_json(msg="Access token already exists and its state is different. It can not be updated without recreating.") + else: + gitlab_access_token.revoke_access_token() + gitlab_access_token.create_access_token(group, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at}) + module.exit_json(changed=True, msg="Successfully recreated access token", access_token=gitlab_access_token.access_token_object._attrs) + else: + gitlab_access_token.create_access_token(group, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at}) + if module.check_mode: + module.exit_json(changed=True, msg="Successfully created access token", access_token={}) + else: + module.exit_json(changed=True, msg="Successfully created access token", access_token=gitlab_access_token.access_token_object._attrs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_group_members.py b/plugins/modules/gitlab_group_members.py index 6edc8c983f..b101cb4e43 100644 --- a/plugins/modules/gitlab_group_members.py +++ b/plugins/modules/gitlab_group_members.py @@ -1,15 +1,12 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2020, Zainab Alsaffar # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: gitlab_group_members short_description: Manage group members on GitLab Server description: @@ -22,6 +19,13 @@ requirements: extends_documentation_fragment: - community.general.auth_basic - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: gitlab_group: @@ -33,22 +37,22 @@ options: gitlab_user: description: - A username or a list of usernames to add to/remove from the GitLab group. - - Mutually exclusive with I(gitlab_users_access). + - Mutually exclusive with O(gitlab_users_access). type: list elements: str access_level: description: - The access level for the user. - - Required if I(state=present), user state is set to present. - - Mutually exclusive with I(gitlab_users_access). + - Required if O(state=present), user state is set to present. + - Mutually exclusive with O(gitlab_users_access). type: str choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] gitlab_users_access: description: - Provide a list of user to access level mappings. - Every dictionary in this list specifies a user (by username) and the access level the user should have. - - Mutually exclusive with I(gitlab_user) and I(access_level). - - Use together with I(purge_users) to remove all users not specified here from the group. + - Mutually exclusive with O(gitlab_user) and O(access_level). + - Use together with O(purge_users) to remove all users not specified here from the group. type: list elements: dict suboptions: @@ -59,7 +63,7 @@ options: access_level: description: - The access level for the user. - - Required if I(state=present), user state is set to present. + - Required if O(state=present), user state is set to present. type: str choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] required: true @@ -67,25 +71,23 @@ options: state: description: - State of the member in the group. - - On C(present), it adds a user to a GitLab group. - - On C(absent), it removes a user from a GitLab group. + - On V(present), it adds a user to a GitLab group. + - On V(absent), it removes a user from a GitLab group. choices: ['present', 'absent'] default: 'present' type: str purge_users: description: - - Adds/remove users of the given access_level to match the given I(gitlab_user)/I(gitlab_users_access) list. - If omitted do not purge orphaned members. - - Is only used when I(state=present). + - Adds/remove users of the given access_level to match the given O(gitlab_user)/O(gitlab_users_access) list. If omitted + do not purge orphaned members. + - Is only used when O(state=present). type: list elements: str choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] version_added: 3.6.0 -notes: - - Supports C(check_mode). -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Add a user to a GitLab Group community.general.gitlab_group_members: api_url: 'https://gitlab.example.com' @@ -147,15 +149,15 @@ EXAMPLES = r''' - name: user2 access_level: maintainer state: absent -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, gitlab_authentication, gitlab, ensure_gitlab_package + auth_argument_spec, gitlab_authentication, gitlab, list_all_kwargs ) @@ -166,16 +168,20 @@ class GitLabGroup(object): # get user id if the user exists def get_user_id(self, gitlab_user): - user_exists = self._gitlab.users.list(username=gitlab_user, all=True) - if user_exists: - return user_exists[0].id + return next( + (u.id for u in self._gitlab.users.list(username=gitlab_user, **list_all_kwargs)), + None + ) # get group id if group exists def get_group_id(self, gitlab_group): - groups = self._gitlab.groups.list(search=gitlab_group, all=True) - for group in groups: - if group.full_path == gitlab_group: - return group.id + return next( + ( + g.id for g in self._gitlab.groups.list(search=gitlab_group, **list_all_kwargs) + if g.full_path == gitlab_group + ), + None + ) # get all members in a group def get_members_in_a_group(self, gitlab_group_id): @@ -268,14 +274,16 @@ def main(): ], supports_check_mode=True, ) - ensure_gitlab_package(module) + + # check prerequisites and connect to gitlab server + gl = gitlab_authentication(module) access_level_int = { - 'guest': gitlab.GUEST_ACCESS, - 'reporter': gitlab.REPORTER_ACCESS, - 'developer': gitlab.DEVELOPER_ACCESS, - 'maintainer': gitlab.MAINTAINER_ACCESS, - 'owner': gitlab.OWNER_ACCESS, + 'guest': gitlab.const.GUEST_ACCESS, + 'reporter': gitlab.const.REPORTER_ACCESS, + 'developer': gitlab.const.DEVELOPER_ACCESS, + 'maintainer': gitlab.const.MAINTAINER_ACCESS, + 'owner': gitlab.const.OWNER_ACCESS, } gitlab_group = module.params['gitlab_group'] @@ -286,9 +294,6 @@ def main(): if purge_users: purge_users = [access_level_int[level] for level in purge_users] - # connect to gitlab server - gl = gitlab_authentication(module) - group = GitLabGroup(module, gl) gitlab_group_id = group.get_group_id(gitlab_group) diff --git a/plugins/modules/gitlab_group_variable.py b/plugins/modules/gitlab_group_variable.py index 4a185b2394..c505547d87 100644 --- a/plugins/modules/gitlab_group_variable.py +++ b/plugins/modules/gitlab_group_variable.py @@ -1,31 +1,36 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2020, Florent Madiot (scodeman@scode.io) # Based on code: # Copyright (c) 2019, Markus Bergholz (markuman@gmail.com) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: gitlab_group_variable short_description: Creates, updates, or deletes GitLab groups variables version_added: 1.2.0 description: - Creates a group variable if it does not exist. - - When a group variable does exist, its value will be updated when the values are different. - - Variables which are untouched in the playbook, but are not untouched in the GitLab group, - they stay untouched (I(purge) is C(false)) or will be deleted (I(purge) is C(true)). + - When a group variable does exist and is not hidden, its value is updated when the values are different. + When a group variable does exist and is hidden, its value is updated. In this case, the module is B(not idempotent). + - Variables which are untouched in the playbook, but are not untouched in the GitLab group, they stay untouched (O(purge=false)) + or are deleted (O(purge=true)). author: - Florent Madiot (@scodeman) requirements: - - python >= 2.7 - python-gitlab python module extends_documentation_fragment: - community.general.auth_basic - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: state: @@ -41,29 +46,31 @@ options: type: str purge: description: - - When set to C(true), delete all variables which are not untouched in the task. + - When set to V(true), delete all variables which are not untouched in the task. default: false type: bool vars: description: - - When the list element is a simple key-value pair, set masked and protected to false. - - When the list element is a dict with the keys I(value), I(masked) and I(protected), the user can - have full control about whether a value should be masked, protected or both. + - When the list element is a simple key-value pair, C(masked), C(hidden), C(raw), and C(protected) are set to V(false). + - When the list element is a dict with the keys C(value), C(masked), C(hidden), C(raw), and C(protected), the user can have full + control about whether a value should be masked, hidden, raw, protected, or a combination. - Support for group variables requires GitLab >= 9.5. - Support for environment_scope requires GitLab Premium >= 13.11. - Support for protected values requires GitLab >= 9.3. - Support for masked values requires GitLab >= 11.10. - - A I(value) must be a string or a number. - - Field I(variable_type) must be a string with either C(env_var), which is the default, or C(file). - - When a value is masked, it must be in Base64 and have a length of at least 8 characters. - See GitLab documentation on acceptable values for a masked variable (U(https://docs.gitlab.com/ce/ci/variables/#masked-variables)). + - Support for hidden values requires GitLab >= 17.4, and was added in community.general 11.3.0. + - Support for raw values requires GitLab >= 15.7. + - A C(value) must be a string or a number. + - Field C(variable_type) must be a string with either V(env_var), which is the default, or V(file). + - When a value is masked, it must be in Base64 and have a length of at least 8 characters. See GitLab documentation + on acceptable values for a masked variable (U(https://docs.gitlab.com/ce/ci/variables/#masked-variables)). default: {} type: dict variables: version_added: 4.5.0 description: - A list of dictionaries that represents CI/CD variables. - - This modules works internal with this sructure, even if the older I(vars) parameter is used. + - This modules works internal with this structure, even if the older O(vars) parameter is used. default: [] type: list elements: dict @@ -76,35 +83,54 @@ options: value: description: - The variable value. - - Required when I(state=present). + - Required when O(state=present). type: str + description: + description: + - A description for the variable. + - Support for descriptions requires GitLab >= 16.2. + type: str + version_added: '11.4.0' masked: description: - - Wether variable value is masked or not. + - Whether variable value is masked or not. type: bool default: false + hidden: + description: + - Whether variable value is hidden or not. + - Implies C(masked). + - Support for hidden values requires GitLab >= 17.4. + type: bool + default: false + version_added: '11.3.0' protected: description: - - Wether variable value is protected or not. + - Whether variable value is protected or not. type: bool default: false + raw: + description: + - Whether variable value is raw or not. + - Support for raw values requires GitLab >= 15.7. + type: bool + default: false + version_added: '7.4.0' variable_type: description: - - Wether a variable is an environment variable (C(env_var)) or a file (C(file)). + - Whether a variable is an environment variable (V(env_var)) or a file (V(file)). type: str - choices: [ "env_var", "file" ] + choices: ["env_var", "file"] default: env_var environment_scope: description: - The scope for the variable. type: str default: '*' -notes: -- Supports I(check_mode). -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Set or update some CI/CD variables community.general.gitlab_group_variable: api_url: https://gitlab.com @@ -121,6 +147,38 @@ EXAMPLES = r''' variable_type: env_var environment_scope: production +- name: Set or update some CI/CD variables with raw value + community.general.gitlab_group_variable: + api_url: https://gitlab.com + api_token: secret_access_token + group: scodeman/testgroup/ + purge: false + vars: + ACCESS_KEY_ID: abc123 + SECRET_ACCESS_KEY: + value: 3214cbad + masked: true + protected: true + raw: true + variable_type: env_var + environment_scope: '*' + +- name: Set or update some CI/CD variables with expandable value + community.general.gitlab_group_variable: + api_url: https://gitlab.com + api_token: secret_access_token + group: scodeman/testgroup/ + purge: false + vars: + ACCESS_KEY_ID: abc123 + SECRET_ACCESS_KEY: + value: '$MY_OTHER_VARIABLE' + masked: true + protected: true + raw: false + variable_type: env_var + environment_scope: '*' + - name: Delete one variable community.general.gitlab_group_variable: api_url: https://gitlab.com @@ -129,9 +187,9 @@ EXAMPLES = r''' state: absent vars: ACCESS_KEY_ID: abc123 -''' +""" -RETURN = r''' +RETURN = r""" group_variable: description: Four lists of the variablenames which were added, updated, removed or exist. returned: always @@ -141,72 +199,32 @@ group_variable: description: A list of variables which were created. returned: always type: list - sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] untouched: description: A list of variables which exist. returned: always type: list - sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] removed: description: A list of variables which were deleted. returned: always type: list - sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] updated: description: A list of variables whose values were changed. returned: always type: list - sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] -''' + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.six import string_types -from ansible.module_utils.six import integer_types - from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, gitlab_authentication, ensure_gitlab_package, filter_returned_variables + auth_argument_spec, gitlab_authentication, filter_returned_variables, vars_to_variables, + list_all_kwargs ) -def vars_to_variables(vars, module): - # transform old vars to new variables structure - variables = list() - for item, value in vars.items(): - if (isinstance(value, string_types) or - isinstance(value, (integer_types, float))): - variables.append( - { - "name": item, - "value": str(value), - "masked": False, - "protected": False, - "variable_type": "env_var", - } - ) - - elif isinstance(value, dict): - new_item = {"name": item, "value": value.get('value')} - - new_item = { - "name": item, - "value": value.get('value'), - "masked": value.get('masked'), - "protected": value.get('protected'), - "variable_type": value.get('variable_type'), - } - - if value.get('environment_scope'): - new_item['environment_scope'] = value.get('environment_scope') - - variables.append(new_item) - - else: - module.fail_json(msg="value must be of type string, integer, float or dict") - - return variables - - class GitlabGroupVariables(object): def __init__(self, module, gitlab_instance): @@ -218,14 +236,7 @@ class GitlabGroupVariables(object): return self.repo.groups.get(group_name) def list_all_group_variables(self): - page_nb = 1 - variables = [] - vars_page = self.group.variables.list(page=page_nb) - while len(vars_page) > 0: - variables += vars_page - page_nb += 1 - vars_page = self.group.variables.list(page=page_nb) - return variables + return list(self.group.variables.list(**list_all_kwargs)) def create_variable(self, var_obj): if self._module.check_mode: @@ -233,8 +244,11 @@ class GitlabGroupVariables(object): var = { "key": var_obj.get('key'), "value": var_obj.get('value'), + "description": var_obj.get('description'), "masked": var_obj.get('masked'), + "masked_and_hidden": var_obj.get('hidden'), "protected": var_obj.get('protected'), + "raw": var_obj.get('raw'), "variable_type": var_obj.get('variable_type'), } if var_obj.get('environment_scope') is not None: @@ -303,8 +317,12 @@ def native_python_main(this_gitlab, purge, requested_variables, state, module): item['value'] = str(item.get('value')) if item.get('protected') is None: item['protected'] = False + if item.get('raw') is None: + item['raw'] = False if item.get('masked') is None: item['masked'] = False + if item.get('hidden') is None: + item['hidden'] = False if item.get('environment_scope') is None: item['environment_scope'] = '*' if item.get('variable_type') is None: @@ -335,14 +353,13 @@ def native_python_main(this_gitlab, purge, requested_variables, state, module): return_value['removed'].append(item) elif state == 'absent': - # value does not matter on removing variables. - # key and environment scope are sufficient - for item in existing_variables: - item.pop('value') - item.pop('variable_type') - for item in requested_variables: - item.pop('value') - item.pop('variable_type') + # value, type, and description do not matter on removing variables. + keys_ignored_on_deletion = ['value', 'variable_type', 'description'] + for key in keys_ignored_on_deletion: + for item in existing_variables: + item.pop(key) + for item in requested_variables: + item.pop(key) if not purge: remove_requested = [x for x in requested_variables if x in existing_variables] @@ -372,13 +389,18 @@ def main(): argument_spec.update(auth_argument_spec()) argument_spec.update( group=dict(type='str', required=True), - purge=dict(type='bool', required=False, default=False), - vars=dict(type='dict', required=False, default=dict(), no_log=True), - variables=dict(type='list', elements='dict', required=False, default=list(), options=dict( + purge=dict(type='bool', default=False), + vars=dict(type='dict', default=dict(), no_log=True), + # please mind whenever changing the variables dict to also change module_utils/gitlab.py's + # KNOWN dict in filter_returned_variables or bad evil will happen + variables=dict(type='list', elements='dict', default=list(), options=dict( name=dict(type='str', required=True), value=dict(type='str', no_log=True), + description=dict(type='str'), masked=dict(type='bool', default=False), + hidden=dict(type='bool', default=False), protected=dict(type='bool', default=False), + raw=dict(type='bool', default=False), environment_scope=dict(type='str', default='*'), variable_type=dict(type='str', default='env_var', choices=["env_var", "file"]) )), @@ -403,7 +425,9 @@ def main(): ], supports_check_mode=True ) - ensure_gitlab_package(module) + + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) purge = module.params['purge'] var_list = module.params['vars'] @@ -418,8 +442,6 @@ def main(): if any(x['value'] is None for x in variables): module.fail_json(msg='value parameter is required in state present') - gitlab_instance = gitlab_authentication(module) - this_gitlab = GitlabGroupVariables(module=module, gitlab_instance=gitlab_instance) changed, raw_return_value, before, after = native_python_main(this_gitlab, purge, variables, state, module) diff --git a/plugins/modules/gitlab_hook.py b/plugins/modules/gitlab_hook.py index 70864207ed..46997c5f62 100644 --- a/plugins/modules/gitlab_hook.py +++ b/plugins/modules/gitlab_hook.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) # Copyright (c) 2018, Marcus Watkins @@ -8,43 +7,47 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: gitlab_hook short_description: Manages GitLab project hooks description: - - Adds, updates and removes project hook + - Adds, updates and removes project hook. author: - Marcus Watkins (@marwatk) - Guillaume Martinez (@Lunik) requirements: - - python >= 2.7 - python-gitlab python module extends_documentation_fragment: - community.general.auth_basic - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: project: description: - - Id or Full path of the project in the form of group/name. + - ID or Full path of the project in the form of group/name. required: true type: str hook_url: description: - - The url that you want GitLab to post to, this is used as the primary key for updates and deletion. + - The URL that you want GitLab to post to, this is used as the primary key for updates and deletion. required: true type: str state: description: - - When C(present) the hook will be updated to match the input or created if it doesn't exist. - - When C(absent) hook will be deleted if it exists. + - When V(present) the hook is updated to match the input or created if it does not exist. + - When V(absent) hook is deleted if it exists. default: present type: str - choices: [ "present", "absent" ] + choices: ["present", "absent"] push_events: description: - Trigger hook on push events. @@ -52,7 +55,7 @@ options: default: true push_events_branch_filter: description: - - Branch name of wildcard to trigger hook on push events + - Branch name of wildcard to trigger hook on push events. type: str version_added: '0.2.0' default: '' @@ -91,22 +94,27 @@ options: - Trigger hook on wiki events. type: bool default: false + releases_events: + description: + - Trigger hook on release events. + type: bool + version_added: '8.4.0' hook_validate_certs: description: - - Whether GitLab will do SSL verification when triggering the hook. + - Whether GitLab performs SSL verification when triggering the hook. type: bool default: false - aliases: [ enable_ssl_verification ] + aliases: [enable_ssl_verification] token: description: - Secret token to validate hook messages at the receiver. - - If this is present it will always result in a change as it cannot be retrieved from GitLab. - - Will show up in the X-GitLab-Token HTTP request header. + - If this is present it always results in a change as it cannot be retrieved from GitLab. + - It shows up in the C(X-GitLab-Token) HTTP request header. required: false type: str -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: "Adding a project hook" community.general.gitlab_hook: api_url: https://gitlab.example.com/ @@ -116,7 +124,6 @@ EXAMPLES = ''' state: present push_events: true tag_push_events: true - hook_validate_certs: false token: "my-super-secret-token-that-my-ci-server-will-check" - name: "Delete the previous hook" @@ -134,37 +141,37 @@ EXAMPLES = ''' project: 10 hook_url: "https://my-ci-server.example.com/gitlab-hook" state: absent -''' +""" -RETURN = ''' +RETURN = r""" msg: - description: Success or failure message + description: Success or failure message. returned: always type: str sample: "Success" result: - description: json parsed response from the server + description: JSON parsed response from the server. returned: always type: dict error: - description: the error message returned by the GitLab API + description: The error message returned by the GitLab API. returned: failed type: str sample: "400: path is already in use" hook: - description: API object + description: API object. returned: always type: dict -''' +""" from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, find_project, gitlab_authentication, ensure_gitlab_package + auth_argument_spec, find_project, gitlab_authentication, list_all_kwargs ) @@ -196,6 +203,7 @@ class GitLabHook(object): 'job_events': options['job_events'], 'pipeline_events': options['pipeline_events'], 'wiki_page_events': options['wiki_page_events'], + 'releases_events': options['releases_events'], 'enable_ssl_verification': options['enable_ssl_verification'], 'token': options['token'], }) @@ -211,6 +219,7 @@ class GitLabHook(object): 'job_events': options['job_events'], 'pipeline_events': options['pipeline_events'], 'wiki_page_events': options['wiki_page_events'], + 'releases_events': options['releases_events'], 'enable_ssl_verification': options['enable_ssl_verification'], 'token': options['token'], }) @@ -259,9 +268,8 @@ class GitLabHook(object): @param hook_url Url to call on event ''' def find_hook(self, project, hook_url): - hooks = project.hooks.list(all=True) - for hook in hooks: - if (hook.url == hook_url): + for hook in project.hooks.list(**list_all_kwargs): + if hook.url == hook_url: return hook ''' @@ -297,6 +305,7 @@ def main(): job_events=dict(type='bool', default=False), pipeline_events=dict(type='bool', default=False), wiki_page_events=dict(type='bool', default=False), + releases_events=dict(type='bool'), hook_validate_certs=dict(type='bool', default=False, aliases=['enable_ssl_verification']), token=dict(type='str', no_log=True), )) @@ -318,7 +327,9 @@ def main(): ], supports_check_mode=True, ) - ensure_gitlab_package(module) + + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) state = module.params['state'] project_identifier = module.params['project'] @@ -332,11 +343,10 @@ def main(): job_events = module.params['job_events'] pipeline_events = module.params['pipeline_events'] wiki_page_events = module.params['wiki_page_events'] + releases_events = module.params['releases_events'] enable_ssl_verification = module.params['hook_validate_certs'] hook_token = module.params['token'] - gitlab_instance = gitlab_authentication(module) - gitlab_hook = GitLabHook(module, gitlab_instance) project = find_project(gitlab_instance, project_identifier) @@ -364,6 +374,7 @@ def main(): "job_events": job_events, "pipeline_events": pipeline_events, "wiki_page_events": wiki_page_events, + "releases_events": releases_events, "enable_ssl_verification": enable_ssl_verification, "token": hook_token, }): diff --git a/plugins/modules/gitlab_instance_variable.py b/plugins/modules/gitlab_instance_variable.py new file mode 100644 index 0000000000..c7075f7454 --- /dev/null +++ b/plugins/modules/gitlab_instance_variable.py @@ -0,0 +1,376 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Benedikt Braunger (bebr@adm.ku.dk) +# Based on code: +# Copyright (c) 2020, Florent Madiot (scodeman@scode.io) +# Copyright (c) 2019, Markus Bergholz (markuman@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_instance_variable +short_description: Creates, updates, or deletes GitLab instance variables +version_added: 7.1.0 +description: + - Creates a instance variable if it does not exist. + - When a instance variable does exist, its value is updated if the values are different. + - Support for instance variables requires GitLab >= 13.0. + - Variables which are not mentioned in the modules options, but are present on the GitLab instance, either stay (O(purge=false)) + or are deleted (O(purge=true)). +author: + - Benedikt Braunger (@benibr) +requirements: + - python-gitlab python module +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + description: + - Create or delete instance variable. + default: present + type: str + choices: ["present", "absent"] + purge: + description: + - When set to V(true), delete all variables which are not mentioned in the task. + default: false + type: bool + variables: + description: + - A list of dictionaries that represents CI/CD variables. + default: [] + type: list + elements: dict + suboptions: + name: + description: + - The name of the variable. + type: str + required: true + value: + description: + - The variable value. + - Required when O(state=present). + type: str + description: + description: + - A description for the variable. + - Support for descriptions requires GitLab >= 16.8. + type: str + version_added: '11.4.0' + masked: + description: + - Whether variable value is masked or not. + type: bool + default: false + protected: + description: + - Whether variable value is protected or not. + type: bool + default: false + raw: + description: + - Whether variable value is raw or not. + - Support for raw values requires GitLab >= 15.7. + type: bool + default: false + version_added: 10.2.0 + variable_type: + description: + - Whether a variable is an environment variable (V(env_var)) or a file (V(file)). + type: str + choices: ["env_var", "file"] + default: env_var +""" + + +EXAMPLES = r""" +- name: Set or update some CI/CD variables + community.general.gitlab_instance_variable: + api_url: https://gitlab.com + api_token: secret_access_token + purge: false + variables: + - name: ACCESS_KEY_ID + value: abc1312cba + - name: SECRET_ACCESS_KEY + value: 1337 + masked: true + protected: true + variable_type: env_var + +- name: Delete one variable + community.general.gitlab_instance_variable: + api_url: https://gitlab.com + api_token: secret_access_token + state: absent + variables: + - name: ACCESS_KEY_ID +""" + +RETURN = r""" +instance_variable: + description: Four lists of the variablenames which were added, updated, removed or exist. + returned: always + type: dict + contains: + added: + description: A list of variables which were created. + returned: always + type: list + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] + untouched: + description: A list of variables which exist. + returned: always + type: list + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] + removed: + description: A list of variables which were deleted. + returned: always + type: list + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] + updated: + description: A list pre-existing variables whose values have been set. + returned: always + type: list + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.api import basic_auth_argument_spec +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, filter_returned_variables, + list_all_kwargs +) + + +class GitlabInstanceVariables(object): + + def __init__(self, module, gitlab_instance): + self.instance = gitlab_instance + self._module = module + + def list_all_instance_variables(self): + return list(self.instance.variables.list(**list_all_kwargs)) + + def create_variable(self, var_obj): + if self._module.check_mode: + return True + var = { + "key": var_obj.get('key'), + "value": var_obj.get('value'), + "description": var_obj.get('description'), + "masked": var_obj.get('masked'), + "protected": var_obj.get('protected'), + "raw": var_obj.get('raw'), + "variable_type": var_obj.get('variable_type'), + } + + self.instance.variables.create(var) + return True + + def update_variable(self, var_obj): + if self._module.check_mode: + return True + self.delete_variable(var_obj) + self.create_variable(var_obj) + return True + + def delete_variable(self, var_obj): + if self._module.check_mode: + return True + self.instance.variables.delete(var_obj.get('key')) + return True + + +def compare(requested_variables, existing_variables, state): + # we need to do this, because it was determined in a previous version - more or less buggy + # basically it is not necessary and might results in more/other bugs! + # but it is required and only relevant for check mode!! + # logic represents state 'present' when not purge. all other can be derived from that + # untouched => equal in both + # updated => name and scope are equal + # added => name and scope does not exist + untouched = list() + updated = list() + added = list() + + if state == 'present': + existing_key_scope_vars = list() + for item in existing_variables: + existing_key_scope_vars.append({'key': item.get('key')}) + + for var in requested_variables: + if var in existing_variables: + untouched.append(var) + else: + compare_item = {'key': var.get('name')} + if compare_item in existing_key_scope_vars: + updated.append(var) + else: + added.append(var) + + return untouched, updated, added + + +def native_python_main(this_gitlab, purge, requested_variables, state, module): + + change = False + return_value = dict(added=list(), updated=list(), removed=list(), untouched=list()) + + gitlab_keys = this_gitlab.list_all_instance_variables() + before = [x.attributes for x in gitlab_keys] + + existing_variables = filter_returned_variables(gitlab_keys) + + for item in requested_variables: + item['key'] = item.pop('name') + item['value'] = str(item.get('value')) + if item.get('protected') is None: + item['protected'] = False + if item.get('masked') is None: + item['masked'] = False + if item.get('raw') is None: + item['raw'] = False + if item.get('variable_type') is None: + item['variable_type'] = 'env_var' + + if module.check_mode: + untouched, updated, added = compare(requested_variables, existing_variables, state) + + if state == 'present': + add_or_update = [x for x in requested_variables if x not in existing_variables] + for item in add_or_update: + try: + if this_gitlab.create_variable(item): + return_value['added'].append(item) + + except Exception: + if this_gitlab.update_variable(item): + return_value['updated'].append(item) + + if purge: + # refetch and filter + gitlab_keys = this_gitlab.list_all_instance_variables() + existing_variables = filter_returned_variables(gitlab_keys) + + remove = [x for x in existing_variables if x not in requested_variables] + for item in remove: + if this_gitlab.delete_variable(item): + return_value['removed'].append(item) + + elif state == 'absent': + # value, type, and description do not matter on removing variables. + keys_ignored_on_deletion = ['value', 'variable_type', 'description'] + for key in keys_ignored_on_deletion: + for item in existing_variables: + item.pop(key) + for item in requested_variables: + item.pop(key) + + if not purge: + remove_requested = [x for x in requested_variables if x in existing_variables] + for item in remove_requested: + if this_gitlab.delete_variable(item): + return_value['removed'].append(item) + + else: + for item in existing_variables: + if this_gitlab.delete_variable(item): + return_value['removed'].append(item) + + if module.check_mode: + return_value = dict(added=added, updated=updated, removed=return_value['removed'], untouched=untouched) + + if len(return_value['added'] + return_value['removed'] + return_value['updated']) > 0: + change = True + + gitlab_keys = this_gitlab.list_all_instance_variables() + after = [x.attributes for x in gitlab_keys] + + return change, return_value, before, after + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update( + purge=dict(type='bool', default=False), + variables=dict(type='list', elements='dict', default=list(), options=dict( + name=dict(type='str', required=True), + value=dict(type='str', no_log=True), + description=dict(type='str'), + masked=dict(type='bool', default=False), + protected=dict(type='bool', default=False), + raw=dict(type='bool', default=False), + variable_type=dict(type='str', default='env_var', choices=["env_var", "file"]) + )), + state=dict(type='str', default="present", choices=["absent", "present"]), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + supports_check_mode=True + ) + + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) + + purge = module.params['purge'] + state = module.params['state'] + + variables = module.params['variables'] + + if state == 'present': + if any(x['value'] is None for x in variables): + module.fail_json(msg='value parameter is required in state present') + + this_gitlab = GitlabInstanceVariables(module=module, gitlab_instance=gitlab_instance) + + changed, raw_return_value, before, after = native_python_main(this_gitlab, purge, variables, state, module) + + # postprocessing + for item in after: + item['name'] = item.pop('key') + for item in before: + item['name'] = item.pop('key') + + untouched_key_name = 'key' + if not module.check_mode: + untouched_key_name = 'name' + raw_return_value['untouched'] = [x for x in before if x in after] + + added = [x.get('key') for x in raw_return_value['added']] + updated = [x.get('key') for x in raw_return_value['updated']] + removed = [x.get('key') for x in raw_return_value['removed']] + untouched = [x.get(untouched_key_name) for x in raw_return_value['untouched']] + return_value = dict(added=added, updated=updated, removed=removed, untouched=untouched) + + module.exit_json(changed=changed, instance_variable=return_value) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_issue.py b/plugins/modules/gitlab_issue.py new file mode 100644 index 0000000000..aab9f2a346 --- /dev/null +++ b/plugins/modules/gitlab_issue.py @@ -0,0 +1,400 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Ondrej Zvara (ozvara1@gmail.com) +# Based on code: +# Copyright (c) 2021, Lennert Mertens (lennert@nubera.be) +# Copyright (c) 2021, Werner Dijkerman (ikben@werner-dijkerman.nl) +# Copyright (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_issue +short_description: Create, update, or delete GitLab issues +version_added: '8.1.0' +description: + - Creates an issue if it does not exist. + - When an issue does exist, it is updated if the provided parameters are different. + - When an issue does exist and O(state=absent), the issue is deleted. + - When multiple issues are detected, the task fails. + - Existing issues are matched based on O(title) and O(state_filter) filters. +author: + - zvaraondrej (@zvaraondrej) +requirements: + - python-gitlab >= 2.3.0 +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + assignee_ids: + description: + - A list of assignee usernames omitting V(@) character. + - Set to an empty array to unassign all assignees. + type: list + elements: str + description: + description: + - A description of the issue. + - Gets overridden by a content of file specified at O(description_path), if found. + type: str + description_path: + description: + - A path of file containing issue's description. + - Accepts MarkDown formatted files. + type: path + issue_type: + description: + - Type of the issue. + default: issue + type: str + choices: ["issue", "incident", "test_case"] + labels: + description: + - A list of label names. + - Set to an empty array to remove all labels. + type: list + elements: str + milestone_search: + description: + - The name of the milestone. + - Set to empty string to unassign milestone. + type: str + milestone_group_id: + description: + - The path or numeric ID of the group hosting desired milestone. + type: str + project: + description: + - The path or name of the project. + required: true + type: str + state: + description: + - Create or delete issue. + default: present + type: str + choices: ["present", "absent"] + state_filter: + description: + - Filter specifying state of issues while searching. + type: str + choices: ["opened", "closed"] + default: opened + title: + description: + - A title for the issue. The title is used as a unique identifier to ensure idempotency. + type: str + required: true +""" + + +EXAMPLES = r""" +- name: Create Issue + community.general.gitlab_issue: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + title: "Ansible demo Issue" + description: "Demo Issue description" + labels: + - Ansible + - Demo + assignee_ids: + - testassignee + state_filter: "opened" + state: present + +- name: Delete Issue + community.general.gitlab_issue: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + title: "Ansible demo Issue" + state_filter: "opened" + state: absent +""" + +RETURN = r""" +msg: + description: Success or failure message. + returned: always + type: str + sample: "Success" + +issue: + description: API object. + returned: success + type: dict +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.common.text.converters import to_native, to_text + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, gitlab, find_project, find_group +) + + +class GitlabIssue(object): + + def __init__(self, module, project, gitlab_instance): + self._gitlab = gitlab_instance + self._module = module + self.project = project + + ''' + @param milestone_id Title of the milestone + ''' + def get_milestone(self, milestone_id, group): + milestones = [] + try: + milestones = group.milestones.list(search=milestone_id) + except gitlab.exceptions.GitlabGetError as e: + self._module.fail_json(msg="Failed to list the Milestones: %s" % to_native(e)) + + if len(milestones) > 1: + self._module.fail_json(msg="Multiple Milestones matched search criteria.") + if len(milestones) < 1: + self._module.fail_json(msg="No Milestones matched search criteria.") + if len(milestones) == 1: + try: + return group.milestones.get(id=milestones[0].id) + except gitlab.exceptions.GitlabGetError as e: + self._module.fail_json(msg="Failed to get the Milestones: %s" % to_native(e)) + + ''' + @param title Title of the Issue + @param state_filter Issue's state to filter on + ''' + def get_issue(self, title, state_filter): + issues = [] + try: + issues = self.project.issues.list(query_parameters={"search": title, "in": "title", "state": state_filter}) + except gitlab.exceptions.GitlabGetError as e: + self._module.fail_json(msg="Failed to list the Issues: %s" % to_native(e)) + + if len(issues) > 1: + self._module.fail_json(msg="Multiple Issues matched search criteria.") + if len(issues) == 1: + try: + return self.project.issues.get(id=issues[0].iid) + except gitlab.exceptions.GitlabGetError as e: + self._module.fail_json(msg="Failed to get the Issue: %s" % to_native(e)) + + ''' + @param username Name of the user + ''' + def get_user(self, username): + users = [] + try: + users = [user for user in self.project.users.list(username=username, all=True) if user.username == username] + except gitlab.exceptions.GitlabGetError as e: + self._module.fail_json(msg="Failed to list the users: %s" % to_native(e)) + + if len(users) > 1: + self._module.fail_json(msg="Multiple Users matched search criteria.") + elif len(users) < 1: + self._module.fail_json(msg="No User matched search criteria.") + else: + return users[0] + + ''' + @param users List of usernames + ''' + def get_user_ids(self, users): + return [self.get_user(user).id for user in users] + + ''' + @param options Options of the Issue + ''' + def create_issue(self, options): + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully created Issue '%s'." % options["title"]) + + try: + return self.project.issues.create(options) + except gitlab.exceptions.GitlabCreateError as e: + self._module.fail_json(msg="Failed to create Issue: %s " % to_native(e)) + + ''' + @param issue Issue object to delete + ''' + def delete_issue(self, issue): + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully deleted Issue '%s'." % issue["title"]) + + try: + return issue.delete() + except gitlab.exceptions.GitlabDeleteError as e: + self._module.fail_json(msg="Failed to delete Issue: '%s'." % to_native(e)) + + ''' + @param issue Issue object to update + @param options Options of the Issue + ''' + def update_issue(self, issue, options): + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully updated Issue '%s'." % issue["title"]) + + try: + return self.project.issues.update(issue.iid, options) + except gitlab.exceptions.GitlabUpdateError as e: + self._module.fail_json(msg="Failed to update Issue %s." % to_native(e)) + + ''' + @param issue Issue object to evaluate + @param options New options to update Issue with + ''' + def issue_has_changed(self, issue, options): + for key, value in options.items(): + if value is not None: + + if key == 'milestone_id': + old_milestone = getattr(issue, 'milestone')['id'] if getattr(issue, 'milestone') else "" + if value != old_milestone: + return True + elif key == 'assignee_ids': + if value != sorted([user["id"] for user in getattr(issue, 'assignees')]): + return True + + elif key == 'labels': + if value != sorted(getattr(issue, key)): + return True + + elif getattr(issue, key) != value: + return True + + return False + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update( + assignee_ids=dict(type='list', elements='str'), + description=dict(type='str'), + description_path=dict(type='path'), + issue_type=dict(type='str', default='issue', choices=["issue", "incident", "test_case"]), + labels=dict(type='list', elements='str'), + milestone_search=dict(type='str'), + milestone_group_id=dict(type='str'), + project=dict(type='str', required=True), + state=dict(type='str', default="present", choices=["absent", "present"]), + state_filter=dict(type='str', default="opened", choices=["opened", "closed"]), + title=dict(type='str', required=True), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ['description', 'description_path'], + ], + required_together=[ + ['api_username', 'api_password'], + ['milestone_search', 'milestone_group_id'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + supports_check_mode=True + ) + + assignee_ids = module.params['assignee_ids'] + description = module.params['description'] + description_path = module.params['description_path'] + issue_type = module.params['issue_type'] + labels = module.params['labels'] + milestone_id = module.params['milestone_search'] + milestone_group_id = module.params['milestone_group_id'] + project = module.params['project'] + state = module.params['state'] + state_filter = module.params['state_filter'] + title = module.params['title'] + + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module, min_version='2.3.0') + + this_project = find_project(gitlab_instance, project) + if this_project is None: + module.fail_json(msg="Failed to get the project: %s" % project) + + this_gitlab = GitlabIssue(module=module, project=this_project, gitlab_instance=gitlab_instance) + + if milestone_id and milestone_group_id: + this_group = find_group(gitlab_instance, milestone_group_id) + if this_group is None: + module.fail_json(msg="Failed to get the group: %s" % milestone_group_id) + + milestone_id = this_gitlab.get_milestone(milestone_id, this_group).id + + this_issue = this_gitlab.get_issue(title, state_filter) + + if state == "present": + if description_path: + try: + with open(description_path, 'rb') as f: + description = to_text(f.read(), errors='surrogate_or_strict') + except IOError as e: + module.fail_json(msg='Cannot open {0}: {1}'.format(description_path, e)) + + # sorting necessary in order to properly detect changes, as we don't want to get false positive + # results due to differences in ids ordering; + assignee_ids = sorted(this_gitlab.get_user_ids(assignee_ids)) if assignee_ids else assignee_ids + labels = sorted(labels) if labels else labels + + options = { + "title": title, + "description": description, + "labels": labels, + "issue_type": issue_type, + "milestone_id": milestone_id, + "assignee_ids": assignee_ids, + } + + if not this_issue: + issue = this_gitlab.create_issue(options) + module.exit_json( + changed=True, msg="Created Issue '{t}'.".format(t=title), + issue=issue.asdict() + ) + else: + if this_gitlab.issue_has_changed(this_issue, options): + issue = this_gitlab.update_issue(this_issue, options) + module.exit_json( + changed=True, msg="Updated Issue '{t}'.".format(t=title), + issue=issue + ) + else: + module.exit_json( + changed=False, msg="Issue '{t}' already exists".format(t=title), + issue=this_issue.asdict() + ) + elif state == "absent": + if not this_issue: + module.exit_json(changed=False, msg="Issue '{t}' does not exist or has already been deleted.".format(t=title)) + else: + issue = this_gitlab.delete_issue(this_issue) + module.exit_json( + changed=True, msg="Issue '{t}' deleted.".format(t=title), + issue=issue + ) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_label.py b/plugins/modules/gitlab_label.py new file mode 100644 index 0000000000..5b6d80e20c --- /dev/null +++ b/plugins/modules/gitlab_label.py @@ -0,0 +1,492 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Gabriele Pongelli (gabriele.pongelli@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_label +short_description: Creates/updates/deletes GitLab Labels belonging to project or group +version_added: 8.3.0 +description: + - When a label does not exist, it is created. + - When a label does exist, its value is updated when the values are different. + - Labels can be purged. +author: + - "Gabriele Pongelli (@gpongelli)" +requirements: + - python-gitlab python module +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + description: + - Create or delete project or group label. + default: present + type: str + choices: ["present", "absent"] + purge: + description: + - When set to V(true), delete all labels which are not mentioned in the task. + default: false + type: bool + required: false + project: + description: + - The path and name of the project. Either this or O(group) is required. + required: false + type: str + group: + description: + - The path of the group. Either this or O(project) is required. + required: false + type: str + labels: + description: + - A list of dictionaries that represents gitlab project's or group's labels. + type: list + elements: dict + required: false + default: [] + suboptions: + name: + description: + - The name of the label. + type: str + required: true + color: + description: + - The color of the label. + - Required when O(state=present). + type: str + priority: + description: + - Integer value to give priority to the label. + type: int + required: false + default: + description: + description: + - Label's description. + type: str + default: + new_name: + description: + - Optional field to change label's name. + type: str + default: +""" + + +EXAMPLES = r""" +# same project's task can be executed for group +- name: Create one Label + community.general.gitlab_label: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + labels: + - name: label_one + color: "#123456" + state: present + +- name: Create many group labels + community.general.gitlab_label: + api_url: https://gitlab.com + api_token: secret_access_token + group: "group1" + labels: + - name: label_one + color: "#123456" + description: this is a label + priority: 20 + - name: label_two + color: "#554422" + state: present + +- name: Create many project labels + community.general.gitlab_label: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + labels: + - name: label_one + color: "#123456" + description: this is a label + priority: 20 + - name: label_two + color: "#554422" + state: present + +- name: Set or update some labels + community.general.gitlab_label: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + labels: + - name: label_one + color: "#224488" + state: present + +- name: Add label in check mode + community.general.gitlab_label: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + labels: + - name: label_one + color: "#224488" + check_mode: true + +- name: Delete Label + community.general.gitlab_label: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + labels: + - name: label_one + state: absent + +- name: Change Label name + community.general.gitlab_label: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + labels: + - name: label_one + new_name: label_two + state: absent + +- name: Purge all labels + community.general.gitlab_label: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + purge: true + +- name: Delete many labels + community.general.gitlab_label: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + state: absent + labels: + - name: label-abc123 + - name: label-two +""" + +RETURN = r""" +labels: + description: Four lists of the labels which were added, updated, removed or exist. + returned: success + type: dict + contains: + added: + description: A list of labels which were created. + returned: always + type: list + sample: ["abcd", "label-one"] + untouched: + description: A list of labels which exist. + returned: always + type: list + sample: ["defg", "new-label"] + removed: + description: A list of labels which were deleted. + returned: always + type: list + sample: ["defg", "new-label"] + updated: + description: A list pre-existing labels whose values have been set. + returned: always + type: list + sample: ["defg", "new-label"] +labels_obj: + description: API object. + returned: success + type: dict +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.api import basic_auth_argument_spec + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, ensure_gitlab_package, find_group, find_project +) + + +class GitlabLabels(object): + + def __init__(self, module, gitlab_instance, group_id, project_id): + self._gitlab = gitlab_instance + self.gitlab_object = group_id if group_id else project_id + self.is_group_label = True if group_id else False + self._module = module + + def list_all_labels(self): + page_nb = 1 + labels = [] + vars_page = self.gitlab_object.labels.list(page=page_nb) + while len(vars_page) > 0: + labels += vars_page + page_nb += 1 + vars_page = self.gitlab_object.labels.list(page=page_nb) + return labels + + def create_label(self, var_obj): + if self._module.check_mode: + return True, True + + var = { + "name": var_obj.get('name'), + "color": var_obj.get('color'), + } + + if var_obj.get('description') is not None: + var["description"] = var_obj.get('description') + + if var_obj.get('priority') is not None: + var["priority"] = var_obj.get('priority') + + _obj = self.gitlab_object.labels.create(var) + return True, _obj.asdict() + + def update_label(self, var_obj): + if self._module.check_mode: + return True, True + _label = self.gitlab_object.labels.get(var_obj.get('name')) + + if var_obj.get('new_name') is not None: + _label.new_name = var_obj.get('new_name') + + if var_obj.get('description') is not None: + _label.description = var_obj.get('description') + if var_obj.get('priority') is not None: + _label.priority = var_obj.get('priority') + if var_obj.get('color') is not None: + _label.color = var_obj.get('color') + + # save returns None + _label.save() + return True, _label.asdict() + + def delete_label(self, var_obj): + if self._module.check_mode: + return True, True + _label = self.gitlab_object.labels.get(var_obj.get('name')) + # delete returns None + _label.delete() + return True, _label.asdict() + + +def compare(requested_labels, existing_labels, state): + # we need to do this, because it was determined in a previous version - more or less buggy + # basically it is not necessary and might result in more/other bugs! + # but it is required and only relevant for check mode!! + # logic represents state 'present' when not purge. all other can be derived from that + # untouched => equal in both + # updated => name and scope are equal + # added => name and scope does not exist + untouched = list() + updated = list() + added = list() + + if state == 'present': + _existing_labels = list() + for item in existing_labels: + _existing_labels.append({'name': item.get('name')}) + + for var in requested_labels: + if var in existing_labels: + untouched.append(var) + else: + compare_item = {'name': var.get('name')} + if compare_item in _existing_labels: + updated.append(var) + else: + added.append(var) + + return untouched, updated, added + + +def native_python_main(this_gitlab, purge, requested_labels, state, module): + change = False + return_value = dict(added=[], updated=[], removed=[], untouched=[]) + return_obj = dict(added=[], updated=[], removed=[]) + + labels_before = [x.asdict() for x in this_gitlab.list_all_labels()] + + # filter out and enrich before compare + for item in requested_labels: + # add defaults when not present + if item.get('description') is None: + item['description'] = "" + if item.get('new_name') is None: + item['new_name'] = None + if item.get('priority') is None: + item['priority'] = None + + # group label does not have priority, removing for comparison + if this_gitlab.is_group_label: + item.pop('priority') + + for item in labels_before: + # remove field only from server + item.pop('id') + item.pop('description_html') + item.pop('text_color') + item.pop('subscribed') + # field present only when it is a project's label + if 'is_project_label' in item: + item.pop('is_project_label') + item['new_name'] = None + + if state == 'present': + add_or_update = [x for x in requested_labels if x not in labels_before] + for item in add_or_update: + try: + _rv, _obj = this_gitlab.create_label(item) + if _rv: + return_value['added'].append(item) + return_obj['added'].append(_obj) + except Exception: + # create raises exception with following error message when label already exists + _rv, _obj = this_gitlab.update_label(item) + if _rv: + return_value['updated'].append(item) + return_obj['updated'].append(_obj) + + if purge: + # re-fetch + _labels = this_gitlab.list_all_labels() + + for item in labels_before: + _rv, _obj = this_gitlab.delete_label(item) + if _rv: + return_value['removed'].append(item) + return_obj['removed'].append(_obj) + + elif state == 'absent': + if not purge: + _label_names_requested = [x['name'] for x in requested_labels] + remove_requested = [x for x in labels_before if x['name'] in _label_names_requested] + for item in remove_requested: + _rv, _obj = this_gitlab.delete_label(item) + if _rv: + return_value['removed'].append(item) + return_obj['removed'].append(_obj) + else: + for item in labels_before: + _rv, _obj = this_gitlab.delete_label(item) + if _rv: + return_value['removed'].append(item) + return_obj['removed'].append(_obj) + + if module.check_mode: + _untouched, _updated, _added = compare(requested_labels, labels_before, state) + return_value = dict(added=_added, updated=_updated, removed=return_value['removed'], untouched=_untouched) + + if any(return_value[x] for x in ['added', 'removed', 'updated']): + change = True + + labels_after = [x.asdict() for x in this_gitlab.list_all_labels()] + + return change, return_value, labels_before, labels_after, return_obj + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update( + project=dict(type='str'), + group=dict(type='str'), + purge=dict(type='bool', default=False), + labels=dict(type='list', elements='dict', default=list(), + options=dict( + name=dict(type='str', required=True), + color=dict(type='str'), + description=dict(type='str'), + priority=dict(type='int'), + new_name=dict(type='str')) + ), + state=dict(type='str', default="present", choices=["absent", "present"]), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ['project', 'group'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'], + ['project', 'group'] + ], + supports_check_mode=True + ) + ensure_gitlab_package(module) + + gitlab_project = module.params['project'] + gitlab_group = module.params['group'] + purge = module.params['purge'] + label_list = module.params['labels'] + state = module.params['state'] + + gitlab_instance = gitlab_authentication(module, min_version='3.2.0') + + # find_project can return None, but the other must exist + gitlab_project_id = find_project(gitlab_instance, gitlab_project) + + # find_group can return None, but the other must exist + gitlab_group_id = find_group(gitlab_instance, gitlab_group) + + # if both not found, module must exist + if not gitlab_project_id and not gitlab_group_id: + if gitlab_project and not gitlab_project_id: + module.fail_json(msg="project '%s' not found." % gitlab_project) + if gitlab_group and not gitlab_group_id: + module.fail_json(msg="group '%s' not found." % gitlab_group) + + this_gitlab = GitlabLabels(module=module, gitlab_instance=gitlab_instance, group_id=gitlab_group_id, + project_id=gitlab_project_id) + + if state == 'present': + _existing_labels = [x.asdict()['name'] for x in this_gitlab.list_all_labels()] + + # color is mandatory when creating label, but it is optional when changing name or updating other fields + if any(x['color'] is None and x['new_name'] is None and x['name'] not in _existing_labels for x in label_list): + module.fail_json(msg='color parameter is required for new labels') + + change, raw_return_value, before, after, _obj = native_python_main(this_gitlab, purge, label_list, state, module) + + if not module.check_mode: + raw_return_value['untouched'] = [x for x in before if x in after] + + added = [x.get('name') for x in raw_return_value['added']] + updated = [x.get('name') for x in raw_return_value['updated']] + removed = [x.get('name') for x in raw_return_value['removed']] + untouched = [x.get('name') for x in raw_return_value['untouched']] + return_value = dict(added=added, updated=updated, removed=removed, untouched=untouched) + + module.exit_json(changed=change, labels=return_value, labels_obj=_obj) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_merge_request.py b/plugins/modules/gitlab_merge_request.py new file mode 100644 index 0000000000..83000a8ac1 --- /dev/null +++ b/plugins/modules/gitlab_merge_request.py @@ -0,0 +1,413 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Ondrej Zvara (ozvara1@gmail.com) +# Based on code: +# Copyright (c) 2021, Lennert Mertens (lennert@nubera.be) +# Copyright (c) 2021, Werner Dijkerman (ikben@werner-dijkerman.nl) +# Copyright (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_merge_request +short_description: Create, update, or delete GitLab merge requests +version_added: 7.1.0 +description: + - Creates a merge request if it does not exist. + - When a single merge request does exist, it is updated if the provided parameters are different. + - When a single merge request does exist and O(state=absent), the merge request is deleted. + - When multiple merge requests are detected, the task fails. + - Existing merge requests are matched based on O(title), O(source_branch), O(target_branch), and O(state_filter) filters. +author: + - zvaraondrej (@zvaraondrej) +requirements: + - python-gitlab >= 2.3.0 +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + description: + - Create or delete merge request. + default: present + type: str + choices: ["present", "absent"] + project: + description: + - The path or name of the project. + required: true + type: str + source_branch: + description: + - Merge request's source branch. + - Ignored while updating existing merge request. + required: true + type: str + target_branch: + description: + - Merge request's target branch. + required: true + type: str + title: + description: + - A title for the merge request. + type: str + required: true + description: + description: + - A description for the merge request. + - Gets overridden by a content of file specified at O(description_path), if found. + type: str + description_path: + description: + - A path of file containing merge request's description. + - Accepts MarkDown formatted files. + type: path + labels: + description: + - Comma separated list of label names. + type: str + default: "" + remove_source_branch: + description: + - Flag indicating if a merge request should remove the source branch when merging. + type: bool + default: false + state_filter: + description: + - Filter specifying state of merge requests while searching. + type: str + choices: ["opened", "closed", "locked", "merged"] + default: opened + assignee_ids: + description: + - Comma separated list of assignees usernames omitting V(@) character. + - Set to empty string to unassign all assignees. + type: str + reviewer_ids: + description: + - Comma separated list of reviewers usernames omitting V(@) character. + - Set to empty string to unassign all reviewers. + type: str +""" + + +EXAMPLES = r""" +- name: Create Merge Request from branch1 to branch2 + community.general.gitlab_merge_request: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + source_branch: branch1 + target_branch: branch2 + title: "Ansible demo MR" + description: "Demo MR description" + labels: "Ansible,Demo" + state_filter: "opened" + remove_source_branch: true + state: present + +- name: Delete Merge Request from branch1 to branch2 + community.general.gitlab_merge_request: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + source_branch: branch1 + target_branch: branch2 + title: "Ansible demo MR" + state_filter: "opened" + state: absent +""" + +RETURN = r""" +msg: + description: Success or failure message. + returned: always + type: str + sample: "Success" + +mr: + description: API object. + returned: success + type: dict +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.common.text.converters import to_native, to_text + +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, gitlab, find_project +) + + +class GitlabMergeRequest(object): + + def __init__(self, module, project, gitlab_instance): + self._gitlab = gitlab_instance + self._module = module + self.project = project + + ''' + @param branch Name of the branch + ''' + def get_branch(self, branch): + try: + return self.project.branches.get(branch) + except gitlab.exceptions.GitlabGetError as e: + self._module.fail_json(msg="Failed to get the branch: %s" % to_native(e)) + + ''' + @param title Title of the Merge Request + @param source_branch Merge Request's source branch + @param target_branch Merge Request's target branch + @param state_filter Merge Request's state to filter on + ''' + def get_mr(self, title, source_branch, target_branch, state_filter): + mrs = [] + try: + mrs = self.project.mergerequests.list(search=title, source_branch=source_branch, target_branch=target_branch, state=state_filter) + except gitlab.exceptions.GitlabGetError as e: + self._module.fail_json(msg="Failed to list the Merge Request: %s" % to_native(e)) + + if len(mrs) > 1: + self._module.fail_json(msg="Multiple Merge Requests matched search criteria.") + if len(mrs) == 1: + try: + return self.project.mergerequests.get(id=mrs[0].iid) + except gitlab.exceptions.GitlabGetError as e: + self._module.fail_json(msg="Failed to get the Merge Request: %s" % to_native(e)) + + ''' + @param username Name of the user + ''' + def get_user(self, username): + users = [] + try: + users = [user for user in self.project.users.list(username=username, all=True) if user.username == username] + except gitlab.exceptions.GitlabGetError as e: + self._module.fail_json(msg="Failed to list the users: %s" % to_native(e)) + + if len(users) > 1: + self._module.fail_json(msg="Multiple Users matched search criteria.") + elif len(users) < 1: + self._module.fail_json(msg="No User matched search criteria.") + else: + return users[0] + + ''' + @param users List of usernames + ''' + def get_user_ids(self, users): + return [self.get_user(user).id for user in users] + + ''' + @param options Options of the Merge Request + ''' + def create_mr(self, options): + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully created the Merge Request %s" % options["title"]) + + try: + return self.project.mergerequests.create(options) + except gitlab.exceptions.GitlabCreateError as e: + self._module.fail_json(msg="Failed to create Merge Request: %s " % to_native(e)) + + ''' + @param mr Merge Request object to delete + ''' + def delete_mr(self, mr): + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully deleted the Merge Request %s" % mr["title"]) + + try: + return mr.delete() + except gitlab.exceptions.GitlabDeleteError as e: + self._module.fail_json(msg="Failed to delete Merge Request: %s " % to_native(e)) + + ''' + @param mr Merge Request object to update + ''' + def update_mr(self, mr, options): + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully updated the Merge Request %s" % mr["title"]) + + try: + return self.project.mergerequests.update(mr.iid, options) + except gitlab.exceptions.GitlabUpdateError as e: + self._module.fail_json(msg="Failed to update Merge Request: %s " % to_native(e)) + + ''' + @param mr Merge Request object to evaluate + @param options New options to update MR with + ''' + def mr_has_changed(self, mr, options): + for key, value in options.items(): + if value is not None: + # see https://gitlab.com/gitlab-org/gitlab-foss/-/issues/27355 + if key == 'remove_source_branch': + key = 'force_remove_source_branch' + + if key == 'assignee_ids': + if value != sorted([user["id"] for user in getattr(mr, 'assignees')]): + return True + + elif key == 'reviewer_ids': + if value != sorted([user["id"] for user in getattr(mr, 'reviewers')]): + return True + + elif key == 'labels': + if value != sorted(getattr(mr, key)): + return True + + elif getattr(mr, key) != value: + return True + + return False + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update( + project=dict(type='str', required=True), + source_branch=dict(type='str', required=True), + target_branch=dict(type='str', required=True), + title=dict(type='str', required=True), + description=dict(type='str'), + labels=dict(type='str', default=""), + description_path=dict(type='path'), + remove_source_branch=dict(type='bool', default=False), + state_filter=dict(type='str', default="opened", choices=["opened", "closed", "locked", "merged"]), + assignee_ids=dict(type='str'), + reviewer_ids=dict(type='str'), + state=dict(type='str', default="present", choices=["absent", "present"]), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ['description', 'description_path'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + required_if=[ + ['state', 'present', ['source_branch', 'target_branch', 'title'], True], + ['state', 'absent', ['source_branch', 'target_branch', 'title'], True], + ], + supports_check_mode=True + ) + + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) + + project = module.params['project'] + source_branch = module.params['source_branch'] + target_branch = module.params['target_branch'] + title = module.params['title'] + description = module.params['description'] + labels = module.params['labels'] + description_path = module.params['description_path'] + remove_source_branch = module.params['remove_source_branch'] + state_filter = module.params['state_filter'] + assignee_ids = module.params['assignee_ids'] + reviewer_ids = module.params['reviewer_ids'] + state = module.params['state'] + + gitlab_version = gitlab.__version__ + if LooseVersion(gitlab_version) < LooseVersion('2.3.0'): + module.fail_json(msg="community.general.gitlab_merge_request requires python-gitlab Python module >= 2.3.0 (installed version: [%s])." + " Please upgrade python-gitlab to version 2.3.0 or above." % gitlab_version) + + this_project = find_project(gitlab_instance, project) + if this_project is None: + module.fail_json(msg="Failed to get the project: %s" % project) + + this_gitlab = GitlabMergeRequest(module=module, project=this_project, gitlab_instance=gitlab_instance) + + r_source_branch = this_gitlab.get_branch(source_branch) + if not r_source_branch: + module.fail_json(msg="Source branch {b} not exist.".format(b=r_source_branch)) + + r_target_branch = this_gitlab.get_branch(target_branch) + if not r_target_branch: + module.fail_json(msg="Destination branch {b} not exist.".format(b=r_target_branch)) + + this_mr = this_gitlab.get_mr(title, source_branch, target_branch, state_filter) + + if state == "present": + if description_path: + try: + with open(description_path, 'rb') as f: + description = to_text(f.read(), errors='surrogate_or_strict') + except IOError as e: + module.fail_json(msg='Cannot open {0}: {1}'.format(description_path, e)) + + # sorting necessary in order to properly detect changes, as we don't want to get false positive + # results due to differences in ids ordering; see `mr_has_changed()` + assignee_ids = sorted(this_gitlab.get_user_ids(assignee_ids.split(","))) if assignee_ids else [] + reviewer_ids = sorted(this_gitlab.get_user_ids(reviewer_ids.split(","))) if reviewer_ids else [] + labels = sorted(labels.split(",")) if labels else [] + + options = { + "target_branch": target_branch, + "title": title, + "description": description, + "labels": labels, + "remove_source_branch": remove_source_branch, + "reviewer_ids": reviewer_ids, + "assignee_ids": assignee_ids, + } + + if not this_mr: + options["source_branch"] = source_branch + + mr = this_gitlab.create_mr(options) + module.exit_json( + changed=True, msg="Created the Merge Request {t} from branch {s} to branch {d}.".format(t=title, d=target_branch, s=source_branch), + mr=mr.asdict() + ) + else: + if this_gitlab.mr_has_changed(this_mr, options): + mr = this_gitlab.update_mr(this_mr, options) + module.exit_json( + changed=True, msg="Merge Request {t} from branch {s} to branch {d} updated.".format(t=title, d=target_branch, s=source_branch), + mr=mr + ) + else: + module.exit_json( + changed=False, msg="Merge Request {t} from branch {s} to branch {d} already exist".format(t=title, d=target_branch, s=source_branch), + mr=this_mr.asdict() + ) + elif this_mr and state == "absent": + mr = this_gitlab.delete_mr(this_mr) + module.exit_json( + changed=True, msg="Merge Request {t} from branch {s} to branch {d} deleted.".format(t=title, d=target_branch, s=source_branch), + mr=mr + ) + else: + module.exit_json(changed=False, msg="No changes are needed.", mr=this_mr.asdict()) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_milestone.py b/plugins/modules/gitlab_milestone.py new file mode 100644 index 0000000000..bb4992117c --- /dev/null +++ b/plugins/modules/gitlab_milestone.py @@ -0,0 +1,486 @@ +#!/usr/bin/python + +# Copyright (c) 2023, Gabriele Pongelli (gabriele.pongelli@gmail.com) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_milestone +short_description: Creates/updates/deletes GitLab Milestones belonging to project or group +version_added: 8.3.0 +description: + - When a milestone does not exist, it is created. + - When a milestone does exist, its value is updated when the values are different. + - Milestones can be purged. +author: + - "Gabriele Pongelli (@gpongelli)" +requirements: + - python-gitlab python module +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + state: + description: + - Create or delete milestone. + default: present + type: str + choices: ["present", "absent"] + purge: + description: + - When set to V(true), delete all milestone which are not mentioned in the task. + default: false + type: bool + required: false + project: + description: + - The path and name of the project. Either this or O(group) is required. + required: false + type: str + group: + description: + - The path of the group. Either this or O(project) is required. + required: false + type: str + milestones: + description: + - A list of dictionaries that represents gitlab project's or group's milestones. + type: list + elements: dict + required: false + default: [] + suboptions: + title: + description: + - The name of the milestone. + type: str + required: true + due_date: + description: + - Milestone due date in YYYY-MM-DD format. + type: str + required: false + default: null + start_date: + description: + - Milestone start date in YYYY-MM-DD format. + type: str + required: false + default: null + description: + description: + - Milestone's description. + type: str + default: null +""" + + +EXAMPLES = r""" +# same project's task can be executed for group +- name: Create one milestone + community.general.gitlab_milestone: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + milestones: + - title: milestone_one + start_date: "2024-01-04" + state: present + +- name: Create many group milestones + community.general.gitlab_milestone: + api_url: https://gitlab.com + api_token: secret_access_token + group: "group1" + milestones: + - title: milestone_one + start_date: "2024-01-04" + description: this is a milestone + due_date: "2024-02-04" + - title: milestone_two + state: present + +- name: Create many project milestones + community.general.gitlab_milestone: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + milestones: + - title: milestone_one + start_date: "2024-01-04" + description: this is a milestone + due_date: "2024-02-04" + - title: milestone_two + state: present + +- name: Set or update some milestones + community.general.gitlab_milestone: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + milestones: + - title: milestone_one + start_date: "2024-05-04" + state: present + +- name: Add milestone in check mode + community.general.gitlab_milestone: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + milestones: + - title: milestone_one + start_date: "2024-05-04" + check_mode: true + +- name: Delete milestone + community.general.gitlab_milestone: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + milestones: + - title: milestone_one + state: absent + +- name: Purge all milestones + community.general.gitlab_milestone: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + purge: true + +- name: Delete many milestones + community.general.gitlab_milestone: + api_url: https://gitlab.com + api_token: secret_access_token + project: "group1/project1" + state: absent + milestones: + - title: milestone-abc123 + - title: milestone-two +""" + +RETURN = r""" +milestones: + description: Four lists of the milestones which were added, updated, removed or exist. + returned: success + type: dict + contains: + added: + description: A list of milestones which were created. + returned: always + type: list + sample: ["abcd", "milestone-one"] + untouched: + description: A list of milestones which exist. + returned: always + type: list + sample: ["defg", "new-milestone"] + removed: + description: A list of milestones which were deleted. + returned: always + type: list + sample: ["defg", "new-milestone"] + updated: + description: A list pre-existing milestones whose values have been set. + returned: always + type: list + sample: ["defg", "new-milestone"] +milestones_obj: + description: API object. + returned: success + type: dict +""" + +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.api import basic_auth_argument_spec + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, gitlab_authentication, ensure_gitlab_package, find_group, find_project +) +from datetime import datetime + + +class GitlabMilestones(object): + + def __init__(self, module, gitlab_instance, group_id, project_id): + self._gitlab = gitlab_instance + self.gitlab_object = group_id if group_id else project_id + self.is_group_milestone = True if group_id else False + self._module = module + + def list_all_milestones(self): + page_nb = 1 + milestones = [] + vars_page = self.gitlab_object.milestones.list(page=page_nb) + while len(vars_page) > 0: + milestones += vars_page + page_nb += 1 + vars_page = self.gitlab_object.milestones.list(page=page_nb) + return milestones + + def create_milestone(self, var_obj): + if self._module.check_mode: + return True, True + + var = { + "title": var_obj.get('title'), + } + + if var_obj.get('description') is not None: + var["description"] = var_obj.get('description') + + if var_obj.get('start_date') is not None: + var["start_date"] = self.check_date(var_obj.get('start_date')) + + if var_obj.get('due_date') is not None: + var["due_date"] = self.check_date(var_obj.get('due_date')) + + _obj = self.gitlab_object.milestones.create(var) + return True, _obj.asdict() + + def update_milestone(self, var_obj): + if self._module.check_mode: + return True, True + _milestone = self.gitlab_object.milestones.get(self.get_milestone_id(var_obj.get('title'))) + + if var_obj.get('description') is not None: + _milestone.description = var_obj.get('description') + + if var_obj.get('start_date') is not None: + _milestone.start_date = var_obj.get('start_date') + + if var_obj.get('due_date') is not None: + _milestone.due_date = var_obj.get('due_date') + + # save returns None + _milestone.save() + return True, _milestone.asdict() + + def get_milestone_id(self, _title): + _milestone_list = self.gitlab_object.milestones.list() + _found = [x for x in _milestone_list if x.title == _title] + if _found: + return _found[0].id + else: + self._module.fail_json(msg="milestone '%s' not found." % _title) + + def check_date(self, _date): + try: + datetime.strptime(_date, '%Y-%m-%d') + except ValueError: + self._module.fail_json(msg="milestone's date '%s' not in correct format." % _date) + return _date + + def delete_milestone(self, var_obj): + if self._module.check_mode: + return True, True + _milestone = self.gitlab_object.milestones.get(self.get_milestone_id(var_obj.get('title'))) + # delete returns None + _milestone.delete() + return True, _milestone.asdict() + + +def compare(requested_milestones, existing_milestones, state): + # we need to do this, because it was determined in a previous version - more or less buggy + # basically it is not necessary and might result in more/other bugs! + # but it is required and only relevant for check mode!! + # logic represents state 'present' when not purge. all other can be derived from that + # untouched => equal in both + # updated => title are equal + # added => title does not exist + untouched = list() + updated = list() + added = list() + + if state == 'present': + _existing_milestones = list() + for item in existing_milestones: + _existing_milestones.append({'title': item.get('title')}) + + for var in requested_milestones: + if var in existing_milestones: + untouched.append(var) + else: + compare_item = {'title': var.get('title')} + if compare_item in _existing_milestones: + updated.append(var) + else: + added.append(var) + + return untouched, updated, added + + +def native_python_main(this_gitlab, purge, requested_milestones, state, module): + change = False + return_value = dict(added=[], updated=[], removed=[], untouched=[]) + return_obj = dict(added=[], updated=[], removed=[]) + + milestones_before = [x.asdict() for x in this_gitlab.list_all_milestones()] + + # filter out and enrich before compare + for item in requested_milestones: + # add defaults when not present + if item.get('description') is None: + item['description'] = "" + if item.get('due_date') is None: + item['due_date'] = None + if item.get('start_date') is None: + item['start_date'] = None + + for item in milestones_before: + # remove field only from server + item.pop('id') + item.pop('iid') + item.pop('created_at') + item.pop('expired') + item.pop('state') + item.pop('updated_at') + item.pop('web_url') + # group milestone has group_id, while project has project_id + if 'group_id' in item: + item.pop('group_id') + if 'project_id' in item: + item.pop('project_id') + + if state == 'present': + add_or_update = [x for x in requested_milestones if x not in milestones_before] + for item in add_or_update: + try: + _rv, _obj = this_gitlab.create_milestone(item) + if _rv: + return_value['added'].append(item) + return_obj['added'].append(_obj) + except Exception: + # create raises exception with following error message when milestone already exists + _rv, _obj = this_gitlab.update_milestone(item) + if _rv: + return_value['updated'].append(item) + return_obj['updated'].append(_obj) + + if purge: + # re-fetch + _milestones = this_gitlab.list_all_milestones() + + for item in milestones_before: + _rv, _obj = this_gitlab.delete_milestone(item) + if _rv: + return_value['removed'].append(item) + return_obj['removed'].append(_obj) + + elif state == 'absent': + if not purge: + _milestone_titles_requested = [x['title'] for x in requested_milestones] + remove_requested = [x for x in milestones_before if x['title'] in _milestone_titles_requested] + for item in remove_requested: + _rv, _obj = this_gitlab.delete_milestone(item) + if _rv: + return_value['removed'].append(item) + return_obj['removed'].append(_obj) + else: + for item in milestones_before: + _rv, _obj = this_gitlab.delete_milestone(item) + if _rv: + return_value['removed'].append(item) + return_obj['removed'].append(_obj) + + if module.check_mode: + _untouched, _updated, _added = compare(requested_milestones, milestones_before, state) + return_value = dict(added=_added, updated=_updated, removed=return_value['removed'], untouched=_untouched) + + if any(return_value[x] for x in ['added', 'removed', 'updated']): + change = True + + milestones_after = [x.asdict() for x in this_gitlab.list_all_milestones()] + + return change, return_value, milestones_before, milestones_after, return_obj + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update( + project=dict(type='str'), + group=dict(type='str'), + purge=dict(type='bool', default=False), + milestones=dict(type='list', elements='dict', default=[], + options=dict( + title=dict(type='str', required=True), + description=dict(type='str'), + due_date=dict(type='str'), + start_date=dict(type='str')) + ), + state=dict(type='str', default="present", choices=["absent", "present"]), + ) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'], + ['project', 'group'], + ], + required_together=[ + ['api_username', 'api_password'], + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'], + ['project', 'group'] + ], + supports_check_mode=True + ) + ensure_gitlab_package(module) + + gitlab_project = module.params['project'] + gitlab_group = module.params['group'] + purge = module.params['purge'] + milestone_list = module.params['milestones'] + state = module.params['state'] + + gitlab_instance = gitlab_authentication(module, min_version='3.2.0') + + # find_project can return None, but the other must exist + gitlab_project_id = find_project(gitlab_instance, gitlab_project) + + # find_group can return None, but the other must exist + gitlab_group_id = find_group(gitlab_instance, gitlab_group) + + # if both not found, module must exist + if not gitlab_project_id and not gitlab_group_id: + if gitlab_project and not gitlab_project_id: + module.fail_json(msg="project '%s' not found." % gitlab_project) + if gitlab_group and not gitlab_group_id: + module.fail_json(msg="group '%s' not found." % gitlab_group) + + this_gitlab = GitlabMilestones(module=module, gitlab_instance=gitlab_instance, group_id=gitlab_group_id, + project_id=gitlab_project_id) + + change, raw_return_value, before, after, _obj = native_python_main(this_gitlab, purge, milestone_list, state, + module) + + if not module.check_mode: + raw_return_value['untouched'] = [x for x in before if x in after] + + added = [x.get('title') for x in raw_return_value['added']] + updated = [x.get('title') for x in raw_return_value['updated']] + removed = [x.get('title') for x in raw_return_value['removed']] + untouched = [x.get('title') for x in raw_return_value['untouched']] + return_value = dict(added=added, updated=updated, removed=removed, untouched=untouched) + + module.exit_json(changed=change, milestones=return_value, milestones_obj=_obj) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_project.py b/plugins/modules/gitlab_project.py index 1ab8ae220c..b745fe9424 100644 --- a/plugins/modules/gitlab_project.py +++ b/plugins/modules/gitlab_project.py @@ -1,129 +1,226 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) # Copyright (c) 2015, Werner Dijkerman (ikben@werner-dijkerman.nl) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: gitlab_project short_description: Creates/updates/deletes GitLab Projects description: - - When the project does not exist in GitLab, it will be created. - - When the project does exists and I(state=absent), the project will be deleted. - - When changes are made to the project, the project will be updated. + - When the project does not exist in GitLab, it is created. + - When the project does exist and O(state=absent), the project is deleted. + - When changes are made to the project, the project is updated. author: - Werner Dijkerman (@dj-wasabi) - Guillaume Martinez (@Lunik) requirements: - - python >= 2.7 - python-gitlab python module extends_documentation_fragment: - community.general.auth_basic - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: - group: - description: - - Id or the full path of the group of which this projects belongs to. - type: str - name: - description: - - The name of the project. - required: true - type: str - path: - description: - - The path of the project you want to create, this will be server_url//path. - - If not supplied, name will be used. - type: str - description: - description: - - An description for the project. - type: str - initialize_with_readme: - description: - - Will initialize the project with a default C(README.md). - - Is only used when the project is created, and ignored otherwise. - type: bool - default: false - version_added: "4.0.0" - issues_enabled: - description: - - Whether you want to create issues or not. - - Possible values are true and false. - type: bool - default: true - merge_requests_enabled: - description: - - If merge requests can be made or not. - - Possible values are true and false. - type: bool - default: true - wiki_enabled: - description: - - If an wiki for this project should be available or not. - type: bool - default: true - snippets_enabled: - description: - - If creating snippets should be available or not. - type: bool - default: true - visibility: - description: - - C(private) Project access must be granted explicitly for each user. - - C(internal) The project can be cloned by any logged in user. - - C(public) The project can be cloned without any authentication. - default: private - type: str - choices: ["private", "internal", "public"] - aliases: - - visibility_level - import_url: - description: - - Git repository which will be imported into gitlab. - - GitLab server needs read access to this git repository. - required: false - type: str - state: - description: - - Create or delete project. - - Possible values are present and absent. - default: present - type: str - choices: ["present", "absent"] - merge_method: - description: - - What requirements are placed upon merges. - - Possible values are C(merge), C(rebase_merge) merge commit with semi-linear history, C(ff) fast-forward merges only. - type: str - choices: ["ff", "merge", "rebase_merge"] - default: merge - version_added: "1.0.0" - lfs_enabled: - description: - - Enable Git large file systems to manages large files such - as audio, video, and graphics files. - type: bool - required: false - default: false - version_added: "2.0.0" - username: - description: - - Used to create a personal project under a user's name. - type: str - version_added: "3.3.0" allow_merge_on_skipped_pipeline: description: - Allow merge when skipped pipelines exist. type: bool version_added: "3.4.0" + avatar_path: + description: + - Absolute path image to configure avatar. File size should not exceed 200 kb. + - This option is only used on creation, not for updates. + type: path + version_added: "4.2.0" + build_timeout: + description: + - Maximum number of seconds a CI job can run. + - If not specified on creation, GitLab imposes a default value. + type: int + version_added: "10.6.0" + builds_access_level: + description: + - V(private) means that repository CI/CD is allowed only to project members. + - V(disabled) means that repository CI/CD is disabled. + - V(enabled) means that repository CI/CD is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.2.0" + ci_config_path: + description: + - Custom path to the CI configuration file for this project. + type: str + version_added: "3.7.0" + container_expiration_policy: + description: + - Project cleanup policy for its container registry. + type: dict + suboptions: + cadence: + description: + - How often cleanup should be run. + type: str + choices: ["1d", "7d", "14d", "1month", "3month"] + enabled: + description: + - Enable the cleanup policy. + type: bool + keep_n: + description: + - Number of tags kept per image name. + - V(0) clears the field. + type: int + choices: [0, 1, 5, 10, 25, 50, 100] + older_than: + description: + - Destroy tags older than this. + - V(0d) clears the field. + type: str + choices: ["0d", "7d", "14d", "30d", "90d"] + name_regex: + description: + - Destroy tags matching this regular expression. + type: str + name_regex_keep: + description: + - Keep tags matching this regular expression. + type: str + version_added: "9.3.0" + container_registry_access_level: + description: + - V(private) means that container registry is allowed only to project members. + - V(disabled) means that container registry is disabled. + - V(enabled) means that container registry is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.2.0" + default_branch: + description: + - The default branch name for this project. + - For project creation, this option requires O(initialize_with_readme=true). + - For project update, the branch must exist. + - Supports project's default branch update since community.general 8.0.0. + type: str + version_added: "4.2.0" + description: + description: + - An description for the project. + type: str + environments_access_level: + description: + - V(private) means that deployment to environment is allowed only to project members. + - V(disabled) means that deployment to environment is disabled. + - V(enabled) means that deployment to environment is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + feature_flags_access_level: + description: + - V(private) means that feature rollout is allowed only to project members. + - V(disabled) means that feature rollout is disabled. + - V(enabled) means that feature rollout is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + forking_access_level: + description: + - V(private) means that repository forks is allowed only to project members. + - V(disabled) means that repository forks are disabled. + - V(enabled) means that repository forks are enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.2.0" + group: + description: + - ID or the full path of the group of which this projects belongs to. + type: str + import_url: + description: + - Git repository which is imported into gitlab. + - GitLab server needs read access to this git repository. + required: false + type: str + infrastructure_access_level: + description: + - V(private) means that configuring infrastructure is allowed only to project members. + - V(disabled) means that configuring infrastructure is disabled. + - V(enabled) means that configuring infrastructure is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + initialize_with_readme: + description: + - Initializes the project with a default C(README.md). + - Is only used when the project is created, and ignored otherwise. + type: bool + default: false + version_added: "4.0.0" + issues_access_level: + description: + - V(private) means that accessing issues tab is allowed only to project members. + - V(disabled) means that accessing issues tab is disabled. + - V(enabled) means that accessing issues tab is enabled. + - O(issues_access_level) and O(issues_enabled) are mutually exclusive. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "9.4.0" + issues_enabled: + description: + - Whether you want to create issues or not. + - O(issues_access_level) and O(issues_enabled) are mutually exclusive. + type: bool + default: true + lfs_enabled: + description: + - Enable Git large file systems to manages large files such as audio, video, and graphics files. + type: bool + required: false + default: false + version_added: "2.0.0" + merge_method: + description: + - What requirements are placed upon merges. + - Possible values are V(merge), V(rebase_merge) merge commit with semi-linear history, V(ff) fast-forward merges only. + type: str + choices: ["ff", "merge", "rebase_merge"] + default: merge + version_added: "1.0.0" + merge_requests_enabled: + description: + - If merge requests can be made or not. + type: bool + default: true + model_registry_access_level: + description: + - V(private) means that accessing model registry tab is allowed only to project members. + - V(disabled) means that accessing model registry tab is disabled. + - V(enabled) means that accessing model registry tab is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "9.3.0" + monitor_access_level: + description: + - V(private) means that monitoring health is allowed only to project members. + - V(disabled) means that monitoring health is disabled. + - V(enabled) means that monitoring health is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + name: + description: + - The name of the project. + required: true + type: str only_allow_merge_if_all_discussions_are_resolved: description: - All discussions on a merge request (MR) have to be resolved. @@ -139,66 +236,106 @@ options: - Enable GitLab package repository. type: bool version_added: "3.4.0" + pages_access_level: + description: + - V(private) means that accessing pages tab is allowed only to project members. + - V(disabled) means that accessing pages tab is disabled. + - V(enabled) means that accessing pages tab is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "9.3.0" + path: + description: + - The path of the project you want to create, this is server_url/O(group)/O(path). + - If not supplied, O(name) is used. + type: str + releases_access_level: + description: + - V(private) means that accessing release is allowed only to project members. + - V(disabled) means that accessing release is disabled. + - V(enabled) means that accessing release is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" remove_source_branch_after_merge: description: - Remove the source branch after merge. type: bool version_added: "3.4.0" + repository_access_level: + description: + - V(private) means that accessing repository is allowed only to project members. + - V(disabled) means that accessing repository is disabled. + - V(enabled) means that accessing repository is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "9.3.0" + security_and_compliance_access_level: + description: + - V(private) means that accessing security and complicance tab is allowed only to project members. + - V(disabled) means that accessing security and complicance tab is disabled. + - V(enabled) means that accessing security and complicance tab is enabled. + type: str + choices: ["private", "disabled", "enabled"] + version_added: "6.4.0" + service_desk_enabled: + description: + - Enable Service Desk. + type: bool + version_added: "9.3.0" + shared_runners_enabled: + description: + - Enable shared runners for this project. + type: bool + version_added: "3.7.0" + snippets_enabled: + description: + - If creating snippets should be available or not. + type: bool + default: true squash_option: description: - Squash commits when merging. type: str choices: ["never", "always", "default_off", "default_on"] version_added: "3.4.0" - ci_config_path: + state: description: - - Custom path to the CI configuration file for this project. + - Create or delete project. + - Possible values are present and absent. + default: present type: str - version_added: "3.7.0" - shared_runners_enabled: + choices: ["present", "absent"] + topics: description: - - Enable shared runners for this project. + - A topic or list of topics to be assigned to a project. + - It is compatible with old GitLab server releases (versions before 14, correspond to C(tag_list)). + type: list + elements: str + version_added: "6.6.0" + username: + description: + - Used to create a personal project under a user's name. + type: str + version_added: "3.3.0" + visibility: + description: + - V(private) Project access must be granted explicitly for each user. + - V(internal) The project can be cloned by any logged in user. + - V(public) The project can be cloned without any authentication. + default: private + type: str + choices: ["private", "internal", "public"] + aliases: + - visibility_level + wiki_enabled: + description: + - If an wiki for this project should be available or not. type: bool - version_added: "3.7.0" - avatar_path: - description: - - Absolute path image to configure avatar. File size should not exceed 200 kb. - - This option is only used on creation, not for updates. - type: path - version_added: "4.2.0" - default_branch: - description: - - Default branch name for a new project. - - This option is only used on creation, not for updates. This is also only used if I(initialize_with_readme=true). - type: str - version_added: "4.2.0" - builds_access_level: - description: - - C(private) means that repository CI/CD is allowed only to project members. - - C(disabled) means that repository CI/CD is disabled. - - C(enabled) means that repository CI/CD is enabled. - type: str - choices: ["private", "disabled", "enabled"] - version_added: "6.2.0" - forking_access_level: - description: - - C(private) means that repository forks is allowed only to project members. - - C(disabled) means that repository forks are disabled. - - C(enabled) means that repository forks are enabled. - type: str - choices: ["private", "disabled", "enabled"] - version_added: "6.2.0" - container_registry_access_level: - description: - - C(private) means that container registry is allowed only to project members. - - C(disabled) means that container registry is disabled. - - C(enabled) means that container registry is enabled. - type: str - choices: ["private", "disabled", "enabled"] - version_added: "6.2.0" -''' + default: true +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create GitLab Project community.general.gitlab_project: api_url: https://gitlab.example.com/ @@ -210,7 +347,6 @@ EXAMPLES = r''' community.general.gitlab_project: api_url: https://gitlab.example.com/ api_token: "{{ access_token }}" - validate_certs: false name: my_first_project state: absent delegate_to: localhost @@ -244,9 +380,9 @@ EXAMPLES = r''' api_password: "{{ initial_root_password }}" name: my_second_project group: "10481470" -''' +""" -RETURN = r''' +RETURN = r""" msg: description: Success or failure message. returned: always @@ -254,12 +390,12 @@ msg: sample: "Success" result: - description: json parsed response from the server. + description: JSON-parsed response from the server. returned: always type: dict error: - description: the error message returned by the GitLab API. + description: The error message returned by the GitLab API. returned: failed type: str sample: "400: path is already in use" @@ -268,7 +404,7 @@ project: description: API object. returned: always type: dict -''' +""" from ansible.module_utils.api import basic_auth_argument_spec @@ -276,9 +412,11 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, find_group, find_project, gitlab_authentication, gitlab, ensure_gitlab_package + auth_argument_spec, find_group, find_project, gitlab_authentication, gitlab ) +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion + class GitLabProject(object): def __init__(self, module, gitlab_instance): @@ -291,32 +429,55 @@ class GitLabProject(object): @param namespace Namespace Object (User or Group) @param options Options of the project ''' - def create_or_update_project(self, project_name, namespace, options): + def create_or_update_project(self, module, project_name, namespace, options): changed = False project_options = { - 'name': project_name, - 'description': options['description'], - 'issues_enabled': options['issues_enabled'], - 'merge_requests_enabled': options['merge_requests_enabled'], - 'merge_method': options['merge_method'], - 'wiki_enabled': options['wiki_enabled'], - 'snippets_enabled': options['snippets_enabled'], - 'visibility': options['visibility'], - 'lfs_enabled': options['lfs_enabled'], 'allow_merge_on_skipped_pipeline': options['allow_merge_on_skipped_pipeline'], + 'builds_access_level': options['builds_access_level'], + 'build_timeout': options['build_timeout'], + 'ci_config_path': options['ci_config_path'], + 'container_expiration_policy': options['container_expiration_policy'], + 'container_registry_access_level': options['container_registry_access_level'], + 'description': options['description'], + 'environments_access_level': options['environments_access_level'], + 'feature_flags_access_level': options['feature_flags_access_level'], + 'forking_access_level': options['forking_access_level'], + 'infrastructure_access_level': options['infrastructure_access_level'], + 'issues_access_level': options['issues_access_level'], + 'issues_enabled': options['issues_enabled'], + 'lfs_enabled': options['lfs_enabled'], + 'merge_method': options['merge_method'], + 'merge_requests_enabled': options['merge_requests_enabled'], + 'model_registry_access_level': options['model_registry_access_level'], + 'monitor_access_level': options['monitor_access_level'], + 'name': project_name, 'only_allow_merge_if_all_discussions_are_resolved': options['only_allow_merge_if_all_discussions_are_resolved'], 'only_allow_merge_if_pipeline_succeeds': options['only_allow_merge_if_pipeline_succeeds'], 'packages_enabled': options['packages_enabled'], + 'pages_access_level': options['pages_access_level'], + 'releases_access_level': options['releases_access_level'], 'remove_source_branch_after_merge': options['remove_source_branch_after_merge'], - 'squash_option': options['squash_option'], - 'ci_config_path': options['ci_config_path'], + 'repository_access_level': options['repository_access_level'], + 'security_and_compliance_access_level': options['security_and_compliance_access_level'], + 'service_desk_enabled': options['service_desk_enabled'], 'shared_runners_enabled': options['shared_runners_enabled'], - 'builds_access_level': options['builds_access_level'], - 'forking_access_level': options['forking_access_level'], - 'container_registry_access_level': options['container_registry_access_level'], + 'snippets_enabled': options['snippets_enabled'], + 'squash_option': options['squash_option'], + 'visibility': options['visibility'], + 'wiki_enabled': options['wiki_enabled'], } + + # topics was introduced on gitlab >=14 and replace tag_list. We get current gitlab version + # and check if less than 14. If yes we use tag_list instead topics + if LooseVersion(self._gitlab.version()[0]) < LooseVersion("14"): + project_options['tag_list'] = options['topics'] + else: + project_options['topics'] = options['topics'] + # Because we have already call userExists in main() if self.project_object is None: + if options['default_branch'] and not options['initialize_with_readme']: + module.fail_json(msg="Param default_branch needs param initialize_with_readme set to true") project_options.update({ 'path': options['path'], 'import_url': options['import_url'], @@ -338,6 +499,8 @@ class GitLabProject(object): changed = True else: + if options['default_branch']: + project_options['default_branch'] = options['default_branch'] changed, project = self.update_project(self.project_object, project_options) self.project_object = project @@ -348,7 +511,7 @@ class GitLabProject(object): try: project.save() except Exception as e: - self._module.fail_json(msg="Failed update project: %s " % e) + self._module.fail_json(msg="Failed to update project: %s " % e) return True return False @@ -361,6 +524,8 @@ class GitLabProject(object): return True arguments['namespace_id'] = namespace.id + if 'container_expiration_policy' in arguments: + arguments['container_expiration_policy_attributes'] = arguments['container_expiration_policy'] try: project = self._gitlab.projects.create(arguments) except (gitlab.exceptions.GitlabCreateError) as e: @@ -372,11 +537,7 @@ class GitLabProject(object): @param arguments Attributes of the project ''' def get_options_with_value(self, arguments): - ret_arguments = dict() - for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - ret_arguments[arg_key] = arg_value - + ret_arguments = {k: v for k, v in arguments.items() if v is not None} return ret_arguments ''' @@ -387,9 +548,22 @@ class GitLabProject(object): changed = False for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - if getattr(project, arg_key) != arguments[arg_key]: - setattr(project, arg_key, arguments[arg_key]) + if arg_value is not None: + if getattr(project, arg_key, None) != arg_value: + if arg_key == 'container_expiration_policy': + old_val = getattr(project, arg_key, {}) + final_val = {key: value for key, value in arg_value.items() if value is not None} + + if final_val.get('older_than') == '0d': + final_val['older_than'] = None + if final_val.get('keep_n') == 0: + final_val['keep_n'] = None + + if all(old_val.get(key) == value for key, value in final_val.items()): + continue + setattr(project, 'container_expiration_policy_attributes', final_val) + else: + setattr(project, arg_key, arg_value) changed = True return (changed, project) @@ -419,34 +593,55 @@ def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(auth_argument_spec()) argument_spec.update(dict( - group=dict(type='str'), - name=dict(type='str', required=True), - path=dict(type='str'), - description=dict(type='str'), - initialize_with_readme=dict(type='bool', default=False), - default_branch=dict(type='str'), - issues_enabled=dict(type='bool', default=True), - merge_requests_enabled=dict(type='bool', default=True), - merge_method=dict(type='str', default='merge', choices=["merge", "rebase_merge", "ff"]), - wiki_enabled=dict(type='bool', default=True), - snippets_enabled=dict(default=True, type='bool'), - visibility=dict(type='str', default="private", choices=["internal", "private", "public"], aliases=["visibility_level"]), - import_url=dict(type='str'), - state=dict(type='str', default="present", choices=["absent", "present"]), - lfs_enabled=dict(default=False, type='bool'), - username=dict(type='str'), allow_merge_on_skipped_pipeline=dict(type='bool'), + avatar_path=dict(type='path'), + builds_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + build_timeout=dict(type='int'), + ci_config_path=dict(type='str'), + container_expiration_policy=dict(type='dict', options=dict( + cadence=dict(type='str', choices=["1d", "7d", "14d", "1month", "3month"]), + enabled=dict(type='bool'), + keep_n=dict(type='int', choices=[0, 1, 5, 10, 25, 50, 100]), + older_than=dict(type='str', choices=["0d", "7d", "14d", "30d", "90d"]), + name_regex=dict(type='str'), + name_regex_keep=dict(type='str'), + )), + container_registry_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + default_branch=dict(type='str'), + description=dict(type='str'), + environments_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + feature_flags_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + forking_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + group=dict(type='str'), + import_url=dict(type='str'), + infrastructure_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + initialize_with_readme=dict(type='bool', default=False), + issues_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + issues_enabled=dict(type='bool', default=True), + lfs_enabled=dict(default=False, type='bool'), + merge_method=dict(type='str', default='merge', choices=["merge", "rebase_merge", "ff"]), + merge_requests_enabled=dict(type='bool', default=True), + model_registry_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + monitor_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + name=dict(type='str', required=True), only_allow_merge_if_all_discussions_are_resolved=dict(type='bool'), only_allow_merge_if_pipeline_succeeds=dict(type='bool'), packages_enabled=dict(type='bool'), + pages_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + path=dict(type='str'), + releases_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), remove_source_branch_after_merge=dict(type='bool'), - squash_option=dict(type='str', choices=['never', 'always', 'default_off', 'default_on']), - ci_config_path=dict(type='str'), + repository_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + security_and_compliance_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + service_desk_enabled=dict(type='bool'), shared_runners_enabled=dict(type='bool'), - avatar_path=dict(type='path'), - builds_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - forking_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), - container_registry_access_level=dict(type='str', choices=['private', 'disabled', 'enabled']), + snippets_enabled=dict(default=True, type='bool'), + squash_option=dict(type='str', choices=['never', 'always', 'default_off', 'default_on']), + state=dict(type='str', default="present", choices=["absent", "present"]), + topics=dict(type='list', elements='str'), + username=dict(type='str'), + visibility=dict(type='str', default="private", choices=["internal", "private", "public"], aliases=["visibility_level"]), + wiki_enabled=dict(type='bool', default=True), )) module = AnsibleModule( @@ -458,6 +653,7 @@ def main(): ['api_token', 'api_oauth_token'], ['api_token', 'api_job_token'], ['group', 'username'], + ['issues_access_level', 'issues_enabled'], ], required_together=[ ['api_username', 'api_password'], @@ -467,41 +663,52 @@ def main(): ], supports_check_mode=True, ) - ensure_gitlab_package(module) - group_identifier = module.params['group'] - project_name = module.params['name'] - project_path = module.params['path'] - project_description = module.params['description'] - initialize_with_readme = module.params['initialize_with_readme'] - issues_enabled = module.params['issues_enabled'] - merge_requests_enabled = module.params['merge_requests_enabled'] - merge_method = module.params['merge_method'] - wiki_enabled = module.params['wiki_enabled'] - snippets_enabled = module.params['snippets_enabled'] - visibility = module.params['visibility'] - import_url = module.params['import_url'] - state = module.params['state'] - lfs_enabled = module.params['lfs_enabled'] - username = module.params['username'] + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) + allow_merge_on_skipped_pipeline = module.params['allow_merge_on_skipped_pipeline'] + avatar_path = module.params['avatar_path'] + builds_access_level = module.params['builds_access_level'] + build_timeout = module.params['build_timeout'] + ci_config_path = module.params['ci_config_path'] + container_expiration_policy = module.params['container_expiration_policy'] + container_registry_access_level = module.params['container_registry_access_level'] + default_branch = module.params['default_branch'] + environments_access_level = module.params['environments_access_level'] + feature_flags_access_level = module.params['feature_flags_access_level'] + forking_access_level = module.params['forking_access_level'] + group_identifier = module.params['group'] + import_url = module.params['import_url'] + infrastructure_access_level = module.params['infrastructure_access_level'] + initialize_with_readme = module.params['initialize_with_readme'] + issues_access_level = module.params['issues_access_level'] + issues_enabled = module.params['issues_enabled'] + lfs_enabled = module.params['lfs_enabled'] + merge_method = module.params['merge_method'] + merge_requests_enabled = module.params['merge_requests_enabled'] + model_registry_access_level = module.params['model_registry_access_level'] + monitor_access_level = module.params['monitor_access_level'] only_allow_merge_if_all_discussions_are_resolved = module.params['only_allow_merge_if_all_discussions_are_resolved'] only_allow_merge_if_pipeline_succeeds = module.params['only_allow_merge_if_pipeline_succeeds'] packages_enabled = module.params['packages_enabled'] + pages_access_level = module.params['pages_access_level'] + project_description = module.params['description'] + project_name = module.params['name'] + project_path = module.params['path'] + releases_access_level = module.params['releases_access_level'] remove_source_branch_after_merge = module.params['remove_source_branch_after_merge'] - squash_option = module.params['squash_option'] - ci_config_path = module.params['ci_config_path'] + repository_access_level = module.params['repository_access_level'] + security_and_compliance_access_level = module.params['security_and_compliance_access_level'] + service_desk_enabled = module.params['service_desk_enabled'] shared_runners_enabled = module.params['shared_runners_enabled'] - avatar_path = module.params['avatar_path'] - default_branch = module.params['default_branch'] - builds_access_level = module.params['builds_access_level'] - forking_access_level = module.params['forking_access_level'] - container_registry_access_level = module.params['container_registry_access_level'] - - if default_branch and not initialize_with_readme: - module.fail_json(msg="Param default_branch need param initialize_with_readme set to true") - - gitlab_instance = gitlab_authentication(module) + snippets_enabled = module.params['snippets_enabled'] + squash_option = module.params['squash_option'] + state = module.params['state'] + topics = module.params['topics'] + username = module.params['username'] + visibility = module.params['visibility'] + wiki_enabled = module.params['wiki_enabled'] # Set project_path to project_name if it is empty. if project_path is None: @@ -514,7 +721,7 @@ def main(): if group_identifier: group = find_group(gitlab_instance, group_identifier) if group is None: - module.fail_json(msg="Failed to create project: group %s doesn't exists" % group_identifier) + module.fail_json(msg="Failed to create project: group %s doesn't exist" % group_identifier) namespace_id = group.id else: @@ -540,35 +747,49 @@ def main(): if project_exists: gitlab_project.delete_project() module.exit_json(changed=True, msg="Successfully deleted project %s" % project_name) - module.exit_json(changed=False, msg="Project deleted or does not exists") + module.exit_json(changed=False, msg="Project deleted or does not exist") if state == 'present': - if gitlab_project.create_or_update_project(project_name, namespace, { - "path": project_path, - "description": project_description, - "initialize_with_readme": initialize_with_readme, - "default_branch": default_branch, - "issues_enabled": issues_enabled, - "merge_requests_enabled": merge_requests_enabled, - "merge_method": merge_method, - "wiki_enabled": wiki_enabled, - "snippets_enabled": snippets_enabled, - "visibility": visibility, - "import_url": import_url, - "lfs_enabled": lfs_enabled, + if gitlab_project.create_or_update_project(module, project_name, namespace, { "allow_merge_on_skipped_pipeline": allow_merge_on_skipped_pipeline, + "avatar_path": avatar_path, + "builds_access_level": builds_access_level, + "build_timeout": build_timeout, + "ci_config_path": ci_config_path, + "container_expiration_policy": container_expiration_policy, + "container_registry_access_level": container_registry_access_level, + "default_branch": default_branch, + "description": project_description, + "environments_access_level": environments_access_level, + "feature_flags_access_level": feature_flags_access_level, + "forking_access_level": forking_access_level, + "import_url": import_url, + "infrastructure_access_level": infrastructure_access_level, + "initialize_with_readme": initialize_with_readme, + "issues_access_level": issues_access_level, + "issues_enabled": issues_enabled, + "lfs_enabled": lfs_enabled, + "merge_method": merge_method, + "merge_requests_enabled": merge_requests_enabled, + "model_registry_access_level": model_registry_access_level, + "monitor_access_level": monitor_access_level, "only_allow_merge_if_all_discussions_are_resolved": only_allow_merge_if_all_discussions_are_resolved, "only_allow_merge_if_pipeline_succeeds": only_allow_merge_if_pipeline_succeeds, "packages_enabled": packages_enabled, + "pages_access_level": pages_access_level, + "path": project_path, + "releases_access_level": releases_access_level, "remove_source_branch_after_merge": remove_source_branch_after_merge, - "squash_option": squash_option, - "ci_config_path": ci_config_path, + "repository_access_level": repository_access_level, + "security_and_compliance_access_level": security_and_compliance_access_level, + "service_desk_enabled": service_desk_enabled, "shared_runners_enabled": shared_runners_enabled, - "avatar_path": avatar_path, - "builds_access_level": builds_access_level, - "forking_access_level": forking_access_level, - "container_registry_access_level": container_registry_access_level, + "snippets_enabled": snippets_enabled, + "squash_option": squash_option, + "topics": topics, + "visibility": visibility, + "wiki_enabled": wiki_enabled, }): module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name, project=gitlab_project.project_object._attrs) diff --git a/plugins/modules/gitlab_project_access_token.py b/plugins/modules/gitlab_project_access_token.py new file mode 100644 index 0000000000..27e3b07129 --- /dev/null +++ b/plugins/modules/gitlab_project_access_token.py @@ -0,0 +1,333 @@ +#!/usr/bin/python + +# Copyright (c) 2024, Zoran Krleza (zoran.krleza@true-north.hr) +# Based on code: +# Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) +# Copyright (c) 2018, Marcus Watkins +# Copyright (c) 2013, Phillip Gentry +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: gitlab_project_access_token +short_description: Manages GitLab project access tokens +version_added: 8.4.0 +description: + - Creates and revokes project access tokens. +author: + - Zoran Krleza (@pixslx) +requirements: + - python-gitlab >= 3.1.0 +extends_documentation_fragment: + - community.general.auth_basic + - community.general.gitlab + - community.general.attributes +notes: + - Access tokens can not be changed. If a parameter needs to be changed, an acceess token has to be recreated. Whether tokens + are recreated or not is controlled by the O(recreate) option, which defaults to V(never). + - Token string is contained in the result only when access token is created or recreated. It can not be fetched afterwards. + - Token matching is done by comparing O(name) option. +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + project: + description: + - ID or full path of project in the form of group/name. + required: true + type: str + name: + description: + - Access token's name. + required: true + type: str + scopes: + description: + - Scope of the access token. + - The values V(manage_runner) and V(self_rotate) were added in community.general 11.3.0. + required: true + type: list + elements: str + aliases: ["scope"] + choices: + - api + - read_api + - read_registry + - write_registry + - read_repository + - write_repository + - create_runner + - manage_runner + - ai_features + - k8s_proxy + - self_rotate + access_level: + description: + - Access level of the access token. + - The value V(planner) was added in community.general 11.3.0. + type: str + default: maintainer + choices: ["guest", "planner", "reporter", "developer", "maintainer", "owner"] + expires_at: + description: + - Expiration date of the access token in C(YYYY-MM-DD) format. + - Make sure to quote this value in YAML to ensure it is kept as a string and not interpreted as a YAML date. + type: str + required: true + recreate: + description: + - Whether the access token is recreated if it already exists. + - When V(never) the token is never recreated. + - When V(always) the token is always recreated. + - When V(state_change) the token is recreated if there is a difference between desired state and actual state. + type: str + choices: ["never", "always", "state_change"] + default: never + state: + description: + - When V(present) the access token is added to the project if it does not exist. + - When V(absent) it is removed from the project if it exists. + default: present + type: str + choices: ["present", "absent"] +""" + +EXAMPLES = r""" +- name: "Creating a project access token" + community.general.gitlab_project_access_token: + api_url: https://gitlab.example.com/ + api_token: "somegitlabapitoken" + project: "my_group/my_project" + name: "project_token" + expires_at: "2024-12-31" + access_level: developer + scopes: + - api + - read_api + - read_repository + - write_repository + state: present + +- name: "Revoking a project access token" + community.general.gitlab_project_access_token: + api_url: https://gitlab.example.com/ + api_token: "somegitlabapitoken" + project: "my_group/my_project" + name: "project_token" + expires_at: "2024-12-31" + scopes: + - api + - read_api + - read_repository + - write_repository + state: absent + +- name: "Change (recreate) existing token if its actual state is different than desired state" + community.general.gitlab_project_access_token: + api_url: https://gitlab.example.com/ + api_token: "somegitlabapitoken" + project: "my_group/my_project" + name: "project_token" + expires_at: "2024-12-31" + scopes: + - api + - read_api + - read_repository + - write_repository + recreate: state_change + state: present +""" + +RETURN = r""" +access_token: + description: + - API object. + - Only contains the value of the token if the token was created or recreated. + returned: success and O(state=present) + type: dict +""" + +from datetime import datetime + +from ansible.module_utils.api import basic_auth_argument_spec +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.general.plugins.module_utils.gitlab import ( + auth_argument_spec, find_project, gitlab_authentication, gitlab +) + +ACCESS_LEVELS = dict(guest=10, planner=15, reporter=20, developer=30, maintainer=40, owner=50) + + +class GitLabProjectAccessToken(object): + def __init__(self, module, gitlab_instance): + self._module = module + self._gitlab = gitlab_instance + self.access_token_object = None + + ''' + @param project Project Object + @param arguments Attributes of the access_token + ''' + def create_access_token(self, project, arguments): + changed = False + if self._module.check_mode: + return True + + try: + self.access_token_object = project.access_tokens.create(arguments) + changed = True + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json(msg="Failed to create access token: %s " % to_native(e)) + + return changed + + ''' + @param project Project object + @param name of the access token + ''' + def find_access_token(self, project, name): + access_tokens = [x for x in project.access_tokens.list(all=True) if not getattr(x, 'revoked', False)] + for access_token in access_tokens: + if access_token.name == name: + self.access_token_object = access_token + return False + return False + + def revoke_access_token(self): + if self._module.check_mode: + return True + + changed = False + try: + self.access_token_object.delete() + changed = True + except (gitlab.exceptions.GitlabCreateError) as e: + self._module.fail_json(msg="Failed to revoke access token: %s " % to_native(e)) + + return changed + + def access_tokens_equal(self): + if self.access_token_object.name != self._module.params['name']: + return False + if self.access_token_object.scopes != self._module.params['scopes']: + return False + if self.access_token_object.access_level != ACCESS_LEVELS[self._module.params['access_level']]: + return False + if self.access_token_object.expires_at != self._module.params['expires_at']: + return False + return True + + +def main(): + argument_spec = basic_auth_argument_spec() + argument_spec.update(auth_argument_spec()) + argument_spec.update(dict( + state=dict(type='str', default="present", choices=["absent", "present"]), + project=dict(type='str', required=True), + name=dict(type='str', required=True), + scopes=dict(type='list', + required=True, + aliases=['scope'], + elements='str', + choices=['api', + 'read_api', + 'read_registry', + 'write_registry', + 'read_repository', + 'write_repository', + 'create_runner', + 'manage_runner', + 'ai_features', + 'k8s_proxy', + 'self_rotate']), + access_level=dict(type='str', default='maintainer', choices=['guest', 'planner', 'reporter', 'developer', 'maintainer', 'owner']), + expires_at=dict(type='str', required=True), + recreate=dict(type='str', default='never', choices=['never', 'always', 'state_change']) + )) + + module = AnsibleModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['api_username', 'api_token'], + ['api_username', 'api_oauth_token'], + ['api_username', 'api_job_token'], + ['api_token', 'api_oauth_token'], + ['api_token', 'api_job_token'] + ], + required_together=[ + ['api_username', 'api_password'] + ], + required_one_of=[ + ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'] + ], + supports_check_mode=True + ) + + state = module.params['state'] + project_identifier = module.params['project'] + name = module.params['name'] + scopes = module.params['scopes'] + access_level_str = module.params['access_level'] + expires_at = module.params['expires_at'] + recreate = module.params['recreate'] + + access_level = ACCESS_LEVELS[access_level_str] + + try: + datetime.strptime(expires_at, '%Y-%m-%d') + except ValueError: + module.fail_json(msg="Argument expires_at is not in required format YYYY-MM-DD") + + gitlab_instance = gitlab_authentication(module) + + gitlab_access_token = GitLabProjectAccessToken(module, gitlab_instance) + + project = find_project(gitlab_instance, project_identifier) + if project is None: + module.fail_json(msg="Failed to create access token: project %s does not exists" % project_identifier) + + gitlab_access_token_exists = False + gitlab_access_token.find_access_token(project, name) + if gitlab_access_token.access_token_object is not None: + gitlab_access_token_exists = True + + if state == 'absent': + if gitlab_access_token_exists: + gitlab_access_token.revoke_access_token() + module.exit_json(changed=True, msg="Successfully deleted access token %s" % name) + else: + module.exit_json(changed=False, msg="Access token does not exists") + + if state == 'present': + if gitlab_access_token_exists: + if gitlab_access_token.access_tokens_equal(): + if recreate == 'always': + gitlab_access_token.revoke_access_token() + gitlab_access_token.create_access_token(project, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at}) + module.exit_json(changed=True, msg="Successfully recreated access token", access_token=gitlab_access_token.access_token_object._attrs) + else: + module.exit_json(changed=False, msg="Access token already exists", access_token=gitlab_access_token.access_token_object._attrs) + else: + if recreate == 'never': + module.fail_json(msg="Access token already exists and its state is different. It can not be updated without recreating.") + else: + gitlab_access_token.revoke_access_token() + gitlab_access_token.create_access_token(project, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at}) + module.exit_json(changed=True, msg="Successfully recreated access token", access_token=gitlab_access_token.access_token_object._attrs) + else: + gitlab_access_token.create_access_token(project, {'name': name, 'scopes': scopes, 'access_level': access_level, 'expires_at': expires_at}) + if module.check_mode: + module.exit_json(changed=True, msg="Successfully created access token", access_token={}) + else: + module.exit_json(changed=True, msg="Successfully created access token", access_token=gitlab_access_token.access_token_object._attrs) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/gitlab_project_badge.py b/plugins/modules/gitlab_project_badge.py index 5b1a8d3f1c..8d81765f99 100644 --- a/plugins/modules/gitlab_project_badge.py +++ b/plugins/modules/gitlab_project_badge.py @@ -1,15 +1,12 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2022, Guillaume MARTINEZ (lunik@tiwabbit.fr) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: gitlab_project_badge short_description: Manage project badges on GitLab Server version_added: 6.1.0 @@ -39,8 +36,8 @@ options: state: description: - State of the badge in the project. - - On C(present), it adds a badge to a GitLab project. - - On C(absent), it removes a badge from a GitLab project. + - On V(present), it adds a badge to a GitLab project. + - On V(absent), it removes a badge from a GitLab project. choices: ['present', 'absent'] default: 'present' type: str @@ -57,9 +54,9 @@ options: - A badge is identified by this URL. required: true type: str -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Add a badge to a GitLab Project community.general.gitlab_project_badge: api_url: 'https://example.gitlab.com' @@ -77,12 +74,12 @@ EXAMPLES = r''' state: absent link_url: 'https://example.gitlab.com/%{project_path}' image_url: 'https://example.gitlab.com/%{project_path}/badges/%{default_branch}/pipeline.svg' -''' +""" -RETURN = ''' +RETURN = r""" badge: description: The badge information. - returned: when I(state=present) + returned: when O(state=present) type: dict sample: id: 1 @@ -91,13 +88,13 @@ badge: rendered_link_url: 'http://example.com/ci_status.svg?project=example-org/example-project&ref=master' rendered_image_url: 'https://shields.io/my/badge' kind: project -''' +""" from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, gitlab_authentication, find_project, ensure_gitlab_package + auth_argument_spec, gitlab_authentication, find_project, list_all_kwargs ) @@ -105,7 +102,7 @@ def present_strategy(module, gl, project, wished_badge): changed = False existing_badge = None - for badge in project.badges.list(iterator=True): + for badge in project.badges.list(**list_all_kwargs): if badge.image_url == wished_badge["image_url"]: existing_badge = badge break @@ -135,7 +132,7 @@ def absent_strategy(module, gl, project, wished_badge): changed = False existing_badge = None - for badge in project.badges.list(iterator=True): + for badge in project.badges.list(**list_all_kwargs): if badge.image_url == wished_badge["image_url"]: existing_badge = badge break @@ -159,13 +156,12 @@ state_strategy = { def core(module): - ensure_gitlab_package(module) + # check prerequisites and connect to gitlab server + gl = gitlab_authentication(module) gitlab_project = module.params['project'] state = module.params['state'] - gl = gitlab_authentication(module) - project = find_project(gl, gitlab_project) # project doesn't exist if not project: diff --git a/plugins/modules/gitlab_project_members.py b/plugins/modules/gitlab_project_members.py index 811033f312..c496d4aae5 100644 --- a/plugins/modules/gitlab_project_members.py +++ b/plugins/modules/gitlab_project_members.py @@ -1,16 +1,13 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2021, Sergey Mikhaltsov # Copyright (c) 2020, Zainab Alsaffar # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: gitlab_project_members short_description: Manage project members on GitLab Server version_added: 2.2.0 @@ -25,6 +22,13 @@ requirements: extends_documentation_fragment: - community.general.auth_basic - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: project: @@ -35,21 +39,22 @@ options: gitlab_user: description: - A username or a list of usernames to add to/remove from the GitLab project. - - Mutually exclusive with I(gitlab_users_access). + - Mutually exclusive with O(gitlab_users_access). type: list elements: str access_level: description: - The access level for the user. - - Required if I(state=present), user state is set to present. + - Required if O(state=present), user state is set to present. + - V(owner) was added in community.general 10.6.0. type: str - choices: ['guest', 'reporter', 'developer', 'maintainer'] + choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] gitlab_users_access: description: - Provide a list of user to access level mappings. - Every dictionary in this list specifies a user (by username) and the access level the user should have. - - Mutually exclusive with I(gitlab_user) and I(access_level). - - Use together with I(purge_users) to remove all users not specified here from the project. + - Mutually exclusive with O(gitlab_user) and O(access_level). + - Use together with O(purge_users) to remove all users not specified here from the project. type: list elements: dict suboptions: @@ -60,33 +65,33 @@ options: access_level: description: - The access level for the user. - - Required if I(state=present), user state is set to present. + - Required if O(state=present), user state is set to present. + - V(owner) was added in community.general 10.6.0. type: str - choices: ['guest', 'reporter', 'developer', 'maintainer'] + choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] required: true version_added: 3.7.0 state: description: - State of the member in the project. - - On C(present), it adds a user to a GitLab project. - - On C(absent), it removes a user from a GitLab project. + - On V(present), it adds a user to a GitLab project. + - On V(absent), it removes a user from a GitLab project. choices: ['present', 'absent'] default: 'present' type: str purge_users: description: - - Adds/remove users of the given access_level to match the given I(gitlab_user)/I(gitlab_users_access) list. - If omitted do not purge orphaned members. - - Is only used when I(state=present). + - Adds/remove users of the given access_level to match the given O(gitlab_user)/O(gitlab_users_access) list. If omitted + do not purge orphaned members. + - Is only used when O(state=present). + - V(owner) was added in community.general 10.6.0. type: list elements: str - choices: ['guest', 'reporter', 'developer', 'maintainer'] + choices: ['guest', 'reporter', 'developer', 'maintainer', 'owner'] version_added: 3.7.0 -notes: - - Supports C(check_mode). -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Add a user to a GitLab Project community.general.gitlab_project_members: api_url: 'https://gitlab.example.com' @@ -101,7 +106,6 @@ EXAMPLES = r''' community.general.gitlab_project_members: api_url: 'https://gitlab.example.com' api_token: 'Your-Private-Token' - validate_certs: false project: projectname gitlab_user: username state: absent @@ -136,7 +140,7 @@ EXAMPLES = r''' project: projectname gitlab_user: username access_level: developer - pruge_users: developer + purge_users: developer state: present - name: Remove a list of Users with Dedicated Access Levels to A GitLab project @@ -150,15 +154,15 @@ EXAMPLES = r''' - name: user2 access_level: maintainer state: absent -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, gitlab_authentication, gitlab, ensure_gitlab_package + auth_argument_spec, gitlab_authentication, gitlab ) @@ -236,16 +240,16 @@ def main(): project=dict(type='str', required=True), gitlab_user=dict(type='list', elements='str'), state=dict(type='str', default='present', choices=['present', 'absent']), - access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer']), + access_level=dict(type='str', choices=['guest', 'reporter', 'developer', 'maintainer', 'owner']), purge_users=dict(type='list', elements='str', choices=[ - 'guest', 'reporter', 'developer', 'maintainer']), + 'guest', 'reporter', 'developer', 'maintainer', 'owner']), gitlab_users_access=dict( type='list', elements='dict', options=dict( name=dict(type='str', required=True), access_level=dict(type='str', choices=[ - 'guest', 'reporter', 'developer', 'maintainer'], required=True), + 'guest', 'reporter', 'developer', 'maintainer', 'owner'], required=True), ) ), )) @@ -274,13 +278,16 @@ def main(): ], supports_check_mode=True, ) - ensure_gitlab_package(module) + + # check prerequisites and connect to gitlab server + gl = gitlab_authentication(module) access_level_int = { - 'guest': gitlab.GUEST_ACCESS, - 'reporter': gitlab.REPORTER_ACCESS, - 'developer': gitlab.DEVELOPER_ACCESS, - 'maintainer': gitlab.MAINTAINER_ACCESS, + 'guest': gitlab.const.GUEST_ACCESS, + 'reporter': gitlab.const.REPORTER_ACCESS, + 'developer': gitlab.const.DEVELOPER_ACCESS, + 'maintainer': gitlab.const.MAINTAINER_ACCESS, + 'owner': gitlab.const.OWNER_ACCESS, } gitlab_project = module.params['project'] @@ -291,9 +298,6 @@ def main(): if purge_users: purge_users = [access_level_int[level] for level in purge_users] - # connect to gitlab server - gl = gitlab_authentication(module) - project = GitLabProjectMembers(module, gl) gitlab_project_id = project.get_project(gitlab_project) diff --git a/plugins/modules/gitlab_project_variable.py b/plugins/modules/gitlab_project_variable.py index 986847c07b..cf8dd47524 100644 --- a/plugins/modules/gitlab_project_variable.py +++ b/plugins/modules/gitlab_project_variable.py @@ -1,28 +1,33 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2019, Markus Bergholz (markuman@gmail.com) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: gitlab_project_variable short_description: Creates/updates/deletes GitLab Projects Variables description: - - When a project variable does not exist, it will be created. - - When a project variable does exist, its value will be updated when the values are different. - - Variables which are untouched in the playbook, but are not untouched in the GitLab project, - they stay untouched (I(purge) is C(false)) or will be deleted (I(purge) is C(true)). + - When a project variable does not exist, it is created. + - When a project variable does exist and is not hidden, its value is updated when the values are different. + When a project variable does exist and is hidden, its value is updated. In this case, the module is B(not idempotent). + - Variables which are untouched in the playbook, but are not untouched in the GitLab project, they stay untouched (O(purge=false)) + or are deleted (O(purge=true)). author: - "Markus Bergholz (@markuman)" requirements: - - python >= 2.7 - python-gitlab python module extends_documentation_fragment: - community.general.auth_basic - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: state: @@ -39,30 +44,32 @@ options: type: str purge: description: - - When set to true, all variables which are not untouched in the task will be deleted. + - When set to V(true), all variables which are not untouched in the task are deleted. default: false type: bool vars: description: - - When the list element is a simple key-value pair, masked and protected will be set to false. - - When the list element is a dict with the keys I(value), I(masked) and I(protected), the user can - have full control about whether a value should be masked, protected or both. + - When the list element is a simple key-value pair, C(masked), C(hidden), C(raw), and C(protected) are set to V(false). + - When the list element is a dict with the keys C(value), C(masked), C(hidden), C(raw), and C(protected), the user can have full + control about whether a value should be masked, hidden, raw, protected, or a combination. - Support for protected values requires GitLab >= 9.3. - Support for masked values requires GitLab >= 11.10. + - Support for hidden values requires GitLab >= 17.4, and was added in community.general 11.3.0. + - Support for raw values requires GitLab >= 15.7. - Support for environment_scope requires GitLab Premium >= 13.11. - Support for variable_type requires GitLab >= 11.11. - - A I(value) must be a string or a number. - - Field I(variable_type) must be a string with either C(env_var), which is the default, or C(file). - - Field I(environment_scope) must be a string defined by scope environment. - - When a value is masked, it must be in Base64 and have a length of at least 8 characters. - See GitLab documentation on acceptable values for a masked variable (https://docs.gitlab.com/ce/ci/variables/#masked-variables). + - A C(value) must be a string or a number. + - Field C(variable_type) must be a string with either V(env_var), which is the default, or V(file). + - Field C(environment_scope) must be a string defined by scope environment. + - When a value is masked, it must be in Base64 and have a length of at least 8 characters. See GitLab documentation + on acceptable values for a masked variable (https://docs.gitlab.com/ce/ci/variables/#masked-variables). default: {} type: dict variables: version_added: 4.4.0 description: - A list of dictionaries that represents CI/CD variables. - - This module works internal with this structure, even if the older I(vars) parameter is used. + - This module works internal with this structure, even if the older O(vars) parameter is used. default: [] type: list elements: dict @@ -75,37 +82,58 @@ options: value: description: - The variable value. - - Required when I(state=present). + - Required when O(state=present). type: str + description: + description: + - A description for the variable. + - Support for descriptions requires GitLab >= 16.2. + type: str + version_added: '11.4.0' masked: description: - - Wether variable value is masked or not. + - Whether variable value is masked or not. - Support for masked values requires GitLab >= 11.10. type: bool default: false + hidden: + description: + - Whether variable value is hidden or not. + - Implies C(masked). + - Support for hidden values requires GitLab >= 17.4. + type: bool + default: false + version_added: '11.3.0' protected: description: - - Wether variable value is protected or not. + - Whether variable value is protected or not. - Support for protected values requires GitLab >= 9.3. type: bool default: false + raw: + description: + - Whether variable value is raw or not. + - Support for raw values requires GitLab >= 15.7. + type: bool + default: false + version_added: '7.4.0' variable_type: description: - - Wether a variable is an environment variable (C(env_var)) or a file (C(file)). - - Support for I(variable_type) requires GitLab >= 11.11. + - Whether a variable is an environment variable (V(env_var)) or a file (V(file)). + - Support for O(variables[].variable_type) requires GitLab >= 11.11. type: str choices: ["env_var", "file"] default: env_var environment_scope: description: - The scope for the variable. - - Support for I(environment_scope) requires GitLab Premium >= 13.11. + - Support for O(variables[].environment_scope) requires GitLab Premium >= 13.11. type: str default: '*' -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Set or update some CI/CD variables community.general.gitlab_project_variable: api_url: https://gitlab.com @@ -136,6 +164,38 @@ EXAMPLES = ''' variable_type: env_var environment_scope: '*' +- name: Set or update some CI/CD variables with raw value + community.general.gitlab_project_variable: + api_url: https://gitlab.com + api_token: secret_access_token + project: markuman/dotfiles + purge: false + vars: + ACCESS_KEY_ID: abc123 + SECRET_ACCESS_KEY: + value: 3214cbad + masked: true + protected: true + raw: true + variable_type: env_var + environment_scope: '*' + +- name: Set or update some CI/CD variables with expandable value + community.general.gitlab_project_variable: + api_url: https://gitlab.com + api_token: secret_access_token + project: markuman/dotfiles + purge: false + vars: + ACCESS_KEY_ID: abc123 + SECRET_ACCESS_KEY: + value: '$MY_OTHER_VARIABLE' + masked: true + protected: true + raw: false + variable_type: env_var + environment_scope: '*' + - name: Delete one variable community.general.gitlab_project_variable: api_url: https://gitlab.com @@ -144,9 +204,9 @@ EXAMPLES = ''' state: absent vars: ACCESS_KEY_ID: abc123 -''' +""" -RETURN = ''' +RETURN = r""" project_variable: description: Four lists of the variablenames which were added, updated, removed or exist. returned: always @@ -156,80 +216,34 @@ project_variable: description: A list of variables which were created. returned: always type: list - sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] untouched: description: A list of variables which exist. returned: always type: list - sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] removed: description: A list of variables which were deleted. returned: always type: list - sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] updated: description: A list of variables whose values were changed. returned: always type: list - sample: ['ACCESS_KEY_ID', 'SECRET_ACCESS_KEY'] -''' + sample: ["ACCESS_KEY_ID", "SECRET_ACCESS_KEY"] +""" -import traceback -from ansible.module_utils.basic import AnsibleModule, missing_required_lib +from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.api import basic_auth_argument_spec -from ansible.module_utils.six import string_types -from ansible.module_utils.six import integer_types -GITLAB_IMP_ERR = None -try: - import gitlab - HAS_GITLAB_PACKAGE = True -except Exception: - GITLAB_IMP_ERR = traceback.format_exc() - HAS_GITLAB_PACKAGE = False from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, gitlab_authentication, ensure_gitlab_package, filter_returned_variables + auth_argument_spec, gitlab_authentication, filter_returned_variables, vars_to_variables, + list_all_kwargs ) -def vars_to_variables(vars, module): - # transform old vars to new variables structure - variables = list() - for item, value in vars.items(): - if (isinstance(value, string_types) or - isinstance(value, (integer_types, float))): - variables.append( - { - "name": item, - "value": str(value), - "masked": False, - "protected": False, - "variable_type": "env_var", - } - ) - - elif isinstance(value, dict): - - new_item = { - "name": item, - "value": value.get('value'), - "masked": value.get('masked'), - "protected": value.get('protected'), - "variable_type": value.get('variable_type'), - } - - if value.get('environment_scope'): - new_item['environment_scope'] = value.get('environment_scope') - - variables.append(new_item) - - else: - module.fail_json(msg="value must be of type string, integer, float or dict") - - return variables - - class GitlabProjectVariables(object): def __init__(self, module, gitlab_instance): @@ -241,14 +255,7 @@ class GitlabProjectVariables(object): return self.repo.projects.get(project_name) def list_all_project_variables(self): - page_nb = 1 - variables = [] - vars_page = self.project.variables.list(page=page_nb) - while len(vars_page) > 0: - variables += vars_page - page_nb += 1 - vars_page = self.project.variables.list(page=page_nb) - return variables + return list(self.project.variables.list(**list_all_kwargs)) def create_variable(self, var_obj): if self._module.check_mode: @@ -257,8 +264,11 @@ class GitlabProjectVariables(object): var = { "key": var_obj.get('key'), "value": var_obj.get('value'), + "description": var_obj.get('description'), "masked": var_obj.get('masked'), + "masked_and_hidden": var_obj.get('hidden'), "protected": var_obj.get('protected'), + "raw": var_obj.get('raw'), "variable_type": var_obj.get('variable_type'), } @@ -315,7 +325,7 @@ def compare(requested_variables, existing_variables, state): def native_python_main(this_gitlab, purge, requested_variables, state, module): change = False - return_value = dict(added=list(), updated=list(), removed=list(), untouched=list()) + return_value = dict(added=[], updated=[], removed=[], untouched=[]) gitlab_keys = this_gitlab.list_all_project_variables() before = [x.attributes for x in gitlab_keys] @@ -329,8 +339,12 @@ def native_python_main(this_gitlab, purge, requested_variables, state, module): item['value'] = str(item.get('value')) if item.get('protected') is None: item['protected'] = False + if item.get('raw') is None: + item['raw'] = False if item.get('masked') is None: item['masked'] = False + if item.get('hidden') is None: + item['hidden'] = False if item.get('environment_scope') is None: item['environment_scope'] = '*' if item.get('variable_type') is None: @@ -361,14 +375,13 @@ def native_python_main(this_gitlab, purge, requested_variables, state, module): return_value['removed'].append(item) elif state == 'absent': - # value does not matter on removing variables. - # key and environment scope are sufficient - for item in existing_variables: - item.pop('value') - item.pop('variable_type') - for item in requested_variables: - item.pop('value') - item.pop('variable_type') + # value, type, and description do not matter on removing variables. + keys_ignored_on_deletion = ['value', 'variable_type', 'description'] + for key in keys_ignored_on_deletion: + for item in existing_variables: + item.pop(key) + for item in requested_variables: + item.pop(key) if not purge: remove_requested = [x for x in requested_variables if x in existing_variables] @@ -384,7 +397,7 @@ def native_python_main(this_gitlab, purge, requested_variables, state, module): if module.check_mode: return_value = dict(added=added, updated=updated, removed=return_value['removed'], untouched=untouched) - if return_value['added'] or return_value['removed'] or return_value['updated']: + if any(return_value[x] for x in ['added', 'removed', 'updated']): change = True gitlab_keys = this_gitlab.list_all_project_variables() @@ -398,13 +411,18 @@ def main(): argument_spec.update(auth_argument_spec()) argument_spec.update( project=dict(type='str', required=True), - purge=dict(type='bool', required=False, default=False), - vars=dict(type='dict', required=False, default=dict(), no_log=True), - variables=dict(type='list', elements='dict', required=False, default=list(), options=dict( + purge=dict(type='bool', default=False), + vars=dict(type='dict', default=dict(), no_log=True), + # please mind whenever changing the variables dict to also change module_utils/gitlab.py's + # KNOWN dict in filter_returned_variables or bad evil will happen + variables=dict(type='list', elements='dict', default=list(), options=dict( name=dict(type='str', required=True), value=dict(type='str', no_log=True), + description=dict(type='str'), masked=dict(type='bool', default=False), + hidden=dict(type='bool', default=False), protected=dict(type='bool', default=False), + raw=dict(type='bool', default=False), environment_scope=dict(type='str', default='*'), variable_type=dict(type='str', default='env_var', choices=["env_var", "file"]), )), @@ -429,10 +447,9 @@ def main(): ], supports_check_mode=True ) - ensure_gitlab_package(module) - if not HAS_GITLAB_PACKAGE: - module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) purge = module.params['purge'] var_list = module.params['vars'] @@ -445,9 +462,7 @@ def main(): if state == 'present': if any(x['value'] is None for x in variables): - module.fail_json(msg='value parameter is required in state present') - - gitlab_instance = gitlab_authentication(module) + module.fail_json(msg='value parameter is required for all variables in state present') this_gitlab = GitlabProjectVariables(module=module, gitlab_instance=gitlab_instance) diff --git a/plugins/modules/gitlab_protected_branch.py b/plugins/modules/gitlab_protected_branch.py index 335e1445a2..c779736cc6 100644 --- a/plugins/modules/gitlab_protected_branch.py +++ b/plugins/modules/gitlab_protected_branch.py @@ -1,13 +1,11 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2021, Werner Dijkerman (ikben@werner-dijkerman.nl) # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: gitlab_protected_branch short_description: Manage protection of existing branches version_added: 3.4.0 @@ -16,11 +14,17 @@ description: author: - "Werner Dijkerman (@dj-wasabi)" requirements: - - python >= 2.7 - python-gitlab >= 2.3.0 extends_documentation_fragment: - community.general.auth_basic - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: state: @@ -37,7 +41,7 @@ options: name: description: - The name of the branch that needs to be protected. - - Can make use a wildcard character for like C(production/*) or just have C(main) or C(develop) as value. + - Can make use a wildcard character for like V(production/*) or just have V(main) or V(develop) as value. required: true type: str merge_access_levels: @@ -52,10 +56,20 @@ options: default: maintainer type: str choices: ["maintainer", "developer", "nobody"] -''' + allow_force_push: + description: + - Whether or not to allow force pushes to the protected branch. + type: bool + version_added: '11.3.0' + code_owner_approval_required: + description: + - Whether or not to require code owner approval to push. + type: bool + version_added: '11.3.0' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create protected branch on main community.general.gitlab_protected_branch: api_url: https://gitlab.com @@ -64,11 +78,10 @@ EXAMPLES = ''' name: main merge_access_levels: maintainer push_access_level: nobody +""" -''' - -RETURN = ''' -''' +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.api import basic_auth_argument_spec @@ -76,7 +89,7 @@ from ansible.module_utils.api import basic_auth_argument_spec from ansible_collections.community.general.plugins.module_utils.version import LooseVersion from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, gitlab_authentication, gitlab, ensure_gitlab_package + auth_argument_spec, gitlab_authentication, gitlab ) @@ -87,9 +100,9 @@ class GitlabProtectedBranch(object): self._module = module self.project = self.get_project(project) self.ACCESS_LEVEL = { - 'nobody': gitlab.NO_ACCESS, - 'developer': gitlab.DEVELOPER_ACCESS, - 'maintainer': gitlab.MAINTAINER_ACCESS + 'nobody': gitlab.const.NO_ACCESS, + 'developer': gitlab.const.DEVELOPER_ACCESS, + 'maintainer': gitlab.const.MAINTAINER_ACCESS } def get_project(self, project_name): @@ -101,27 +114,43 @@ class GitlabProtectedBranch(object): except Exception as e: return False - def create_protected_branch(self, name, merge_access_levels, push_access_level): - if self._module.check_mode: - return True - merge = self.ACCESS_LEVEL[merge_access_levels] - push = self.ACCESS_LEVEL[push_access_level] - self.project.protectedbranches.create({ + def create_or_update_protected_branch(self, name, options): + protected_branch_options = { 'name': name, - 'merge_access_level': merge, - 'push_access_level': push - }) + 'allow_force_push': options['allow_force_push'], + 'code_owner_approval_required': options['code_owner_approval_required'], + } + protected_branch = self.protected_branch_exist(name=name) + changed = False + if protected_branch and self.can_update(protected_branch, options): + for arg_key, arg_value in protected_branch_options.items(): + if arg_value is not None: + if getattr(protected_branch, arg_key) != arg_value: + setattr(protected_branch, arg_key, arg_value) + changed = True + if changed and not self._module.check_mode: + protected_branch.save() + else: + # Set immutable options only on (re)creation + protected_branch_options['merge_access_level'] = options['merge_access_levels'] + protected_branch_options['push_access_level'] = options['push_access_level'] + if protected_branch: + # Exists, but couldn't update. So, delete first + self.delete_protected_branch(name) + if not self._module.check_mode: + self.project.protectedbranches.create(protected_branch_options) + changed = True - def compare_protected_branch(self, name, merge_access_levels, push_access_level): - configured_merge = self.ACCESS_LEVEL[merge_access_levels] - configured_push = self.ACCESS_LEVEL[push_access_level] - current = self.protected_branch_exist(name=name) - current_merge = current.merge_access_levels[0]['access_level'] - current_push = current.push_access_levels[0]['access_level'] - if current: - if current.name == name and current_merge == configured_merge and current_push == configured_push: - return True - return False + return changed + + def can_update(self, protected_branch, options): + # these keys are not set on update the same way they are on creation + configured_merge = options['merge_access_levels'] + configured_push = options['push_access_level'] + current_merge = protected_branch.merge_access_levels[0]['access_level'] + current_push = protected_branch.push_access_levels[0]['access_level'] + return ((configured_merge is None or current_merge == configured_merge) and + (configured_push is None or current_push == configured_push)) def delete_protected_branch(self, name): if self._module.check_mode: @@ -137,6 +166,8 @@ def main(): name=dict(type='str', required=True), merge_access_levels=dict(type='str', default="maintainer", choices=["maintainer", "developer", "nobody"]), push_access_level=dict(type='str', default="maintainer", choices=["maintainer", "developer", "nobody"]), + allow_force_push=dict(type='bool'), + code_owner_approval_required=dict(type='bool'), state=dict(type='str', default="present", choices=["absent", "present"]), ) @@ -157,7 +188,9 @@ def main(): ], supports_check_mode=True ) - ensure_gitlab_package(module) + + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) project = module.params['project'] name = module.params['name'] @@ -167,24 +200,24 @@ def main(): gitlab_version = gitlab.__version__ if LooseVersion(gitlab_version) < LooseVersion('2.3.0'): - module.fail_json(msg="community.general.gitlab_proteched_branch requires python-gitlab Python module >= 2.3.0 (installed version: [%s])." + module.fail_json(msg="community.general.gitlab_protected_branch requires python-gitlab Python module >= 2.3.0 (installed version: [%s])." " Please upgrade python-gitlab to version 2.3.0 or above." % gitlab_version) - gitlab_instance = gitlab_authentication(module) this_gitlab = GitlabProtectedBranch(module=module, project=project, gitlab_instance=gitlab_instance) p_branch = this_gitlab.protected_branch_exist(name=name) - if not p_branch and state == "present": - this_gitlab.create_protected_branch(name=name, merge_access_levels=merge_access_levels, push_access_level=push_access_level) - module.exit_json(changed=True, msg="Created the proteched branch.") - elif p_branch and state == "present": - if not this_gitlab.compare_protected_branch(name, merge_access_levels, push_access_level): - this_gitlab.delete_protected_branch(name=name) - this_gitlab.create_protected_branch(name=name, merge_access_levels=merge_access_levels, push_access_level=push_access_level) - module.exit_json(changed=True, msg="Recreated the proteched branch.") + options = { + "merge_access_levels": this_gitlab.ACCESS_LEVEL[merge_access_levels], + "push_access_level": this_gitlab.ACCESS_LEVEL[push_access_level], + "allow_force_push": module.params["allow_force_push"], + "code_owner_approval_required": module.params["code_owner_approval_required"], + } + if state == "present": + changed = this_gitlab.create_or_update_protected_branch(name, options) + module.exit_json(changed=changed, msg="Created or updated the protected branch.") elif p_branch and state == "absent": this_gitlab.delete_protected_branch(name=name) - module.exit_json(changed=True, msg="Deleted the proteched branch.") + module.exit_json(changed=True, msg="Deleted the protected branch.") module.exit_json(changed=False, msg="No changes are needed.") diff --git a/plugins/modules/gitlab_runner.py b/plugins/modules/gitlab_runner.py index 9d7c900cc5..889e2471cc 100644 --- a/plugins/modules/gitlab_runner.py +++ b/plugins/modules/gitlab_runner.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2021, Raphaël Droz (raphael.droz@gmail.com) # Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) @@ -7,40 +6,58 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: gitlab_runner short_description: Create, modify and delete GitLab Runners description: - - Register, update and delete runners with the GitLab API. + - Register, update and delete runners on GitLab Server side with the GitLab API. - All operations are performed using the GitLab API v4. - - For details, consult the full API documentation at U(https://docs.gitlab.com/ee/api/runners.html). - - A valid private API token is required for all operations. You can create as many tokens as you like using the GitLab web interface at - U(https://$GITLAB_URL/profile/personal_access_tokens). - - A valid registration token is required for registering a new runner. - To create shared runners, you need to ask your administrator to give you this token. - It can be found at U(https://$GITLAB_URL/admin/runners/). + - For details, consult the full API documentation at U(https://docs.gitlab.com/ee/api/runners.html) and + U(https://docs.gitlab.com/ee/api/users.html#create-a-runner-linked-to-a-user). + - A valid private API token is required for all operations. You can create as many tokens as you like using the GitLab web + interface at U(https://$GITLAB_URL/profile/personal_access_tokens). + - A valid registration token is required for registering a new runner. To create shared runners, you need to ask your administrator + to give you this token. It can be found at U(https://$GITLAB_URL/admin/runners/). + - This module does not handle the C(gitlab-runner) process part, but only manages the runner on GitLab Server side through + its API. Once the module has created the runner, you may use the generated token to run C(gitlab-runner register) command. notes: - - To create a new runner at least the C(api_token), C(description) and C(api_url) options are required. - - Runners need to have unique descriptions. + - To create a new runner at least the O(api_token), O(description) and O(api_url) options are required. + - Runners need to have unique descriptions, since this attribute is used as key for idempotency. author: - Samy Coenen (@SamyCoenen) - Guillaume Martinez (@Lunik) requirements: - - python >= 2.7 - - python-gitlab >= 1.5.0 + - python-gitlab >= 1.5.0 for legacy runner registration workflow (runner registration token - + U(https://docs.gitlab.com/runner/register/#register-with-a-runner-registration-token-deprecated)) + - python-gitlab >= 4.0.0 for new runner registration workflow (runner authentication token - + U(https://docs.gitlab.com/runner/register/#register-with-a-runner-authentication-token)) extends_documentation_fragment: - community.general.auth_basic - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: + group: + description: + - ID or full path of the group in the form group/subgroup. + - Mutually exclusive with O(owned) and O(project). + - Must be group's numeric ID if O(registration_token) is not set and O(state=present). + type: str + version_added: '6.5.0' project: description: - ID or full path of the project in the form of group/name. - - Mutually exclusive with I(owned) since community.general 4.5.0. + - Mutually exclusive with O(owned) since community.general 4.5.0. + - Mutually exclusive with O(group). + - Must be project's numeric ID if O(registration_token) is not set and O(state=present). type: str version_added: '3.7.0' description: @@ -52,29 +69,43 @@ options: - name state: description: - - Make sure that the runner with the same name exists with the same configuration or delete the runner with the same name. + - Make sure that the runner with the same name exists with the same configuration or delete the runner with the same + name. required: false default: present choices: ["present", "absent"] type: str registration_token: description: - - The registration token is used to register new runners. - - Required if I(state) is C(present). + - The registration token is used to register new runners before GitLab 16.0. + - Required if O(state=present) for GitLab < 16.0. + - If set, the runner is created using the old runner creation workflow. + - If not set, the runner is created using the new runner creation workflow, introduced in GitLab 16.0. + - If not set, requires python-gitlab >= 4.0.0. type: str owned: description: - Searches only runners available to the user when searching for existing, when false admin token required. - - Mutually exclusive with I(project) since community.general 4.5.0. + - Mutually exclusive with O(project) since community.general 4.5.0. + - Mutually exclusive with O(group). default: false type: bool version_added: 2.0.0 active: description: - Define if the runners is immediately active after creation. + - Mutually exclusive with O(paused). required: false default: true type: bool + paused: + description: + - Define if the runners is active or paused after creation. + - Mutually exclusive with O(active). + required: false + default: false + type: bool + version_added: 8.1.0 locked: description: - Determines if the runner is locked or not. @@ -84,21 +115,24 @@ options: access_level: description: - Determines if a runner can pick up jobs only from protected branches. - - If I(access_level_on_creation) is not explicitly set to C(true), this option is ignored on registration and - is only applied on updates. - - If set to C(ref_protected), runner can pick up jobs only from protected branches. - - If set to C(not_protected), runner can pick up jobs from both protected and unprotected branches. + - If O(access_level_on_creation) is not explicitly set to V(true), this option is ignored on registration and is only + applied on updates. + - If set to V(not_protected), runner can pick up jobs from both protected and unprotected branches. + - If set to V(ref_protected), runner can pick up jobs only from protected branches. + - Before community.general 8.0.0 the default was V(ref_protected). This was changed to no default in community.general + 8.0.0. If this option is not specified explicitly, GitLab uses V(not_protected) on creation, and the value set is + not changed on any updates. required: false - default: ref_protected - choices: ["ref_protected", "not_protected"] + choices: ["not_protected", "ref_protected"] type: str access_level_on_creation: description: - Whether the runner should be registered with an access level or not. - - If set to C(true), the value of I(access_level) is used for runner registration. - - If set to C(false), GitLab registers the runner with the default access level. - - The current default of this option is C(false). This default is deprecated and will change to C(true) in commuinty.general 7.0.0. + - If set to V(true), the value of O(access_level) is used for runner registration. + - If set to V(false), GitLab registers the runner with the default access level. + - The default of this option changed to V(true) in community.general 7.0.0. Before, it was V(false). required: false + default: true type: bool version_added: 6.3.0 maximum_timeout: @@ -119,10 +153,48 @@ options: default: [] type: list elements: str -''' +""" -EXAMPLES = ''' -- name: "Register runner" +EXAMPLES = r""" +- name: Create an instance-level runner + community.general.gitlab_runner: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + description: Docker Machine t1 + state: present + active: true + tag_list: ['docker'] + run_untagged: false + locked: false + register: runner # Register module output to run C(gitlab-runner register) command in another task + +- name: Create a group-level runner + community.general.gitlab_runner: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + description: Docker Machine t1 + state: present + active: true + tag_list: ['docker'] + run_untagged: false + locked: false + group: top-level-group/subgroup + register: runner # Register module output to run C(gitlab-runner register) command in another task + +- name: Create a project-level runner + community.general.gitlab_runner: + api_url: https://gitlab.example.com/ + api_token: "{{ access_token }}" + description: Docker Machine t1 + state: present + active: true + tag_list: ['docker'] + run_untagged: false + locked: false + project: top-level-group/subgroup/project + register: runner # Register module output to run C(gitlab-runner register) command in another task + +- name: "Register instance-level runner with registration token (deprecated)" community.general.gitlab_runner: api_url: https://gitlab.example.com/ api_token: "{{ access_token }}" @@ -133,6 +205,7 @@ EXAMPLES = ''' tag_list: ['docker'] run_untagged: false locked: false + register: runner # Register module output to run C(gitlab-runner register) command in another task - name: "Delete runner" community.general.gitlab_runner: @@ -149,7 +222,7 @@ EXAMPLES = ''' owned: true state: absent -- name: Register runner for a specific project +- name: "Register a project-level runner with registration token (deprecated)" community.general.gitlab_runner: api_url: https://gitlab.example.com/ api_token: "{{ access_token }}" @@ -157,118 +230,131 @@ EXAMPLES = ''' description: MyProject runner state: present project: mygroup/mysubgroup/myproject -''' + register: runner # Register module output to run C(gitlab-runner register) command in another task +""" -RETURN = ''' +RETURN = r""" msg: - description: Success or failure message + description: Success or failure message. returned: always type: str sample: "Success" result: - description: json parsed response from the server + description: JSON-parsed response from the server. returned: always type: dict error: - description: the error message returned by the GitLab API + description: The error message returned by the GitLab API. returned: failed type: str sample: "400: path is already in use" runner: - description: API object + description: API object. returned: always type: dict -''' +""" from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, gitlab_authentication, gitlab, ensure_gitlab_package + auth_argument_spec, gitlab_authentication, gitlab, list_all_kwargs ) -try: - cmp # pylint: disable=used-before-assignment -except NameError: - def cmp(a, b): - return (a > b) - (a < b) +from ansible_collections.community.general.plugins.module_utils.version import LooseVersion class GitLabRunner(object): - def __init__(self, module, gitlab_instance, project=None): + def __init__(self, module, gitlab_instance, group=None, project=None): self._module = module self._gitlab = gitlab_instance + self.runner_object = None + # Whether to operate on GitLab-instance-wide or project-wide runners # See https://gitlab.com/gitlab-org/gitlab-ce/issues/60774 # for group runner token access if project: self._runners_endpoint = project.runners.list + elif group: + self._runners_endpoint = group.runners.list elif module.params['owned']: self._runners_endpoint = gitlab_instance.runners.list else: self._runners_endpoint = gitlab_instance.runners.all - self.runner_object = None - def create_or_update_runner(self, description, options): changed = False arguments = { - 'active': options['active'], 'locked': options['locked'], 'run_untagged': options['run_untagged'], 'maximum_timeout': options['maximum_timeout'], 'tag_list': options['tag_list'], } + + if options.get('paused') is not None: + arguments['paused'] = options['paused'] + else: + arguments['active'] = options['active'] + + if options.get('access_level') is not None: + arguments['access_level'] = options['access_level'] # Because we have already call userExists in main() if self.runner_object is None: arguments['description'] = description - arguments['token'] = options['registration_token'] + if options.get('registration_token') is not None: + arguments['token'] = options['registration_token'] + elif options.get('group') is not None: + arguments['runner_type'] = 'group_type' + arguments['group_id'] = options['group'] + elif options.get('project') is not None: + arguments['runner_type'] = 'project_type' + arguments['project_id'] = options['project'] + else: + arguments['runner_type'] = 'instance_type' access_level_on_creation = self._module.params['access_level_on_creation'] - if access_level_on_creation is None: - message = "The option 'access_level_on_creation' is unspecified, so 'false' is assumed. "\ - "That means any value of 'access_level' is ignored and GitLab registers the runner with its default value. "\ - "The option 'access_level_on_creation' will switch to 'true' in community.general 7.0.0" - self._module.deprecate(message, version='7.0.0', collection_name='community.general') - access_level_on_creation = False - - if access_level_on_creation: - arguments['access_level'] = options['access_level'] + if not access_level_on_creation: + arguments.pop('access_level', None) runner = self.create_runner(arguments) changed = True else: - arguments['access_level'] = options['access_level'] changed, runner = self.update_runner(self.runner_object, arguments) + if changed: + if self._module.check_mode: + self._module.exit_json(changed=True, msg="Successfully updated the runner %s" % description) + + try: + runner.save() + except Exception as e: + self._module.fail_json(msg="Failed to update runner: %s " % to_native(e)) self.runner_object = runner - if changed: - if self._module.check_mode: - self._module.exit_json(changed=True, msg="Successfully created or updated the runner %s" % description) - - try: - runner.save() - except Exception as e: - self._module.fail_json(msg="Failed to update runner: %s " % to_native(e)) - return True - else: - return False + return changed ''' @param arguments Attributes of the runner ''' def create_runner(self, arguments): if self._module.check_mode: - return True + class MockRunner: + def __init__(self): + self._attrs = {} + return MockRunner() try: - runner = self._gitlab.runners.create(arguments) + if arguments.get('token') is not None: + runner = self._gitlab.runners.create(arguments) + elif LooseVersion(gitlab.__version__) < LooseVersion('4.0.0'): + self._module.fail_json(msg="New runner creation workflow requires python-gitlab 4.0.0 or higher") + else: + runner = self._gitlab.user.runners.create(arguments) except (gitlab.exceptions.GitlabCreateError) as e: self._module.fail_json(msg="Failed to create runner: %s " % to_native(e)) @@ -282,18 +368,18 @@ class GitLabRunner(object): changed = False for arg_key, arg_value in arguments.items(): - if arguments[arg_key] is not None: - if isinstance(arguments[arg_key], list): + if arg_value is not None: + if isinstance(arg_value, list): list1 = getattr(runner, arg_key) list1.sort() - list2 = arguments[arg_key] + list2 = arg_value list2.sort() - if cmp(list1, list2): - setattr(runner, arg_key, arguments[arg_key]) + if list1 != list2: + setattr(runner, arg_key, arg_value) changed = True else: - if getattr(runner, arg_key) != arguments[arg_key]: - setattr(runner, arg_key, arguments[arg_key]) + if getattr(runner, arg_key) != arg_value: + setattr(runner, arg_key, arg_value) changed = True return (changed, runner) @@ -302,16 +388,16 @@ class GitLabRunner(object): @param description Description of the runner ''' def find_runner(self, description): - runners = self._runners_endpoint(as_list=False) + runners = self._runners_endpoint(**list_all_kwargs) for runner in runners: # python-gitlab 2.2 through at least 2.5 returns a list of dicts for list() instead of a Runner # object, so we need to handle both if hasattr(runner, "description"): - if (runner.description == description): + if runner.description == description: return self._gitlab.runners.get(runner.id) else: - if (runner['description'] == description): + if runner['description'] == description: return self._gitlab.runners.get(runner['id']) ''' @@ -341,15 +427,17 @@ def main(): argument_spec.update(dict( description=dict(type='str', required=True, aliases=["name"]), active=dict(type='bool', default=True), + paused=dict(type='bool', default=False), owned=dict(type='bool', default=False), tag_list=dict(type='list', elements='str', default=[]), run_untagged=dict(type='bool', default=True), locked=dict(type='bool', default=False), - access_level=dict(type='str', default='ref_protected', choices=["not_protected", "ref_protected"]), - access_level_on_creation=dict(type='bool'), + access_level=dict(type='str', choices=["not_protected", "ref_protected"]), + access_level_on_creation=dict(type='bool', default=True), maximum_timeout=dict(type='int', default=3600), registration_token=dict(type='str', no_log=True), project=dict(type='str'), + group=dict(type='str'), state=dict(type='str', default="present", choices=["absent", "present"]), )) @@ -362,6 +450,9 @@ def main(): ['api_token', 'api_oauth_token'], ['api_token', 'api_job_token'], ['project', 'owned'], + ['group', 'owned'], + ['project', 'group'], + ['active', 'paused'], ], required_together=[ ['api_username', 'api_password'], @@ -369,16 +460,16 @@ def main(): required_one_of=[ ['api_username', 'api_token', 'api_oauth_token', 'api_job_token'], ], - required_if=[ - ('state', 'present', ['registration_token']), - ], supports_check_mode=True, ) - ensure_gitlab_package(module) + + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) state = module.params['state'] runner_description = module.params['description'] runner_active = module.params['active'] + runner_paused = module.params['paused'] tag_list = module.params['tag_list'] run_untagged = module.params['run_untagged'] runner_locked = module.params['locked'] @@ -386,16 +477,23 @@ def main(): maximum_timeout = module.params['maximum_timeout'] registration_token = module.params['registration_token'] project = module.params['project'] + group = module.params['group'] - gitlab_instance = gitlab_authentication(module) gitlab_project = None + gitlab_group = None + if project: try: gitlab_project = gitlab_instance.projects.get(project) except gitlab.exceptions.GitlabGetError as e: module.fail_json(msg='No such a project %s' % project, exception=to_native(e)) + elif group: + try: + gitlab_group = gitlab_instance.groups.get(group) + except gitlab.exceptions.GitlabGetError as e: + module.fail_json(msg='No such a group %s' % group, exception=to_native(e)) - gitlab_runner = GitLabRunner(module, gitlab_instance, gitlab_project) + gitlab_runner = GitLabRunner(module, gitlab_instance, gitlab_group, gitlab_project) runner_exists = gitlab_runner.exists_runner(runner_description) if state == 'absent': @@ -406,7 +504,7 @@ def main(): module.exit_json(changed=False, msg="Runner deleted or does not exists") if state == 'present': - if gitlab_runner.create_or_update_runner(runner_description, { + runner_values = { "active": runner_active, "tag_list": tag_list, "run_untagged": run_untagged, @@ -414,7 +512,13 @@ def main(): "access_level": access_level, "maximum_timeout": maximum_timeout, "registration_token": registration_token, - }): + "group": group, + "project": project, + } + if LooseVersion(gitlab_runner._gitlab.version()[0]) >= LooseVersion("14.8.0"): + # the paused attribute for runners is available since 14.8 + runner_values["paused"] = runner_paused + if gitlab_runner.create_or_update_runner(runner_description, runner_values): module.exit_json(changed=True, runner=gitlab_runner.runner_object._attrs, msg="Successfully created or updated the runner %s" % runner_description) else: diff --git a/plugins/modules/gitlab_user.py b/plugins/modules/gitlab_user.py index 4824f7301f..58bfc126ac 100644 --- a/plugins/modules/gitlab_user.py +++ b/plugins/modules/gitlab_user.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2021, Lennert Mertens (lennert@nubera.be) # Copyright (c) 2019, Guillaume Martinez (lunik@tiwabbit.fr) @@ -7,18 +6,16 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: gitlab_user short_description: Creates/updates/deletes/blocks/unblocks GitLab Users description: - - When the user does not exist in GitLab, it will be created. - - When the user exists and state=absent, the user will be deleted. - - When the user exists and state=blocked, the user will be blocked. - - When changes are made to user, the user will be updated. + - When the user does not exist in GitLab, it is created. + - When the user exists and state=absent, the user is deleted. + - When the user exists and state=blocked, the user is blocked. + - When changes are made to user, the user is updated. notes: - From community.general 0.2.0 and onwards, name, email and password are optional while deleting the user. author: @@ -27,18 +24,24 @@ author: - Lennert Mertens (@LennertMertens) - Stef Graces (@stgrace) requirements: - - python >= 2.7 - python-gitlab python module - administrator rights on the GitLab server extends_documentation_fragment: - community.general.auth_basic - community.general.gitlab + - community.general.attributes + +attributes: + check_mode: + support: full + diff_mode: + support: none options: name: description: - Name of the user you want to create. - - Required only if C(state) is set to C(present). + - Required only if O(state=present). type: str username: description: @@ -59,7 +62,7 @@ options: email: description: - The email that belongs to the user. - - Required only if C(state) is set to C(present). + - Required only if O(state=present). type: str sshkey_name: description: @@ -77,18 +80,13 @@ options: version_added: 3.1.0 group: description: - - Id or Full path of parent group in the form of group/name. + - ID or Full path of parent group in the form of group/name. - Add user as a member to this group. type: str access_level: description: - - The access level to the group. One of the following can be used. - - guest - - reporter - - developer - - master (alias for maintainer) - - maintainer - - owner + - The access level to the group. + - The value V(master) is an alias for V(maintainer). default: guest type: str choices: ["guest", "reporter", "developer", "master", "maintainer", "owner"] @@ -116,13 +114,13 @@ options: identities: description: - List of identities to be added/updated for this user. - - To remove all other identities from this user, set I(overwrite_identities=true). + - To remove all other identities from this user, set O(overwrite_identities=true). type: list elements: dict suboptions: provider: description: - - The name of the external identity provider + - The name of the external identity provider. type: str extern_uid: description: @@ -132,19 +130,18 @@ options: overwrite_identities: description: - Overwrite identities with identities added in this module. - - This means that all identities that the user has and that are not listed in I(identities) are removed from the user. - - This is only done if a list is provided for I(identities). To remove all identities, provide an empty list. + - This means that all identities that the user has and that are not listed in O(identities) are removed from the user. + - This is only done if a list is provided for O(identities). To remove all identities, provide an empty list. type: bool default: false version_added: 3.3.0 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: "Delete GitLab User" community.general.gitlab_user: api_url: https://gitlab.example.com/ api_token: "{{ access_token }}" - validate_certs: false username: myusername state: absent @@ -174,8 +171,8 @@ EXAMPLES = ''' password: mysecretpassword email: me@example.com identities: - - provider: Keycloak - extern_uid: f278f95c-12c7-4d51-996f-758cc2eb11bc + - provider: Keycloak + extern_uid: f278f95c-12c7-4d51-996f-758cc2eb11bc state: present group: super_group/mon_group access_level: owner @@ -184,7 +181,6 @@ EXAMPLES = ''' community.general.gitlab_user: api_url: https://gitlab.example.com/ api_token: "{{ access_token }}" - validate_certs: false username: myusername state: blocked @@ -192,34 +188,33 @@ EXAMPLES = ''' community.general.gitlab_user: api_url: https://gitlab.example.com/ api_token: "{{ access_token }}" - validate_certs: false username: myusername state: unblocked -''' +""" -RETURN = ''' +RETURN = r""" msg: - description: Success or failure message + description: Success or failure message. returned: always type: str sample: "Success" result: - description: json parsed response from the server + description: JSON-parsed response from the server. returned: always type: dict error: - description: the error message returned by the GitLab API + description: The error message returned by the GitLab API. returned: failed type: str sample: "400: path is already in use" user: - description: API object + description: API object. returned: always type: dict -''' +""" from ansible.module_utils.api import basic_auth_argument_spec @@ -227,7 +222,7 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.general.plugins.module_utils.gitlab import ( - auth_argument_spec, find_group, gitlab_authentication, gitlab, ensure_gitlab_package + auth_argument_spec, find_group, gitlab_authentication, gitlab, list_all_kwargs ) @@ -237,12 +232,12 @@ class GitLabUser(object): self._gitlab = gitlab_instance self.user_object = None self.ACCESS_LEVEL = { - 'guest': gitlab.GUEST_ACCESS, - 'reporter': gitlab.REPORTER_ACCESS, - 'developer': gitlab.DEVELOPER_ACCESS, - 'master': gitlab.MAINTAINER_ACCESS, - 'maintainer': gitlab.MAINTAINER_ACCESS, - 'owner': gitlab.OWNER_ACCESS, + 'guest': gitlab.const.GUEST_ACCESS, + 'reporter': gitlab.const.REPORTER_ACCESS, + 'developer': gitlab.const.DEVELOPER_ACCESS, + 'master': gitlab.const.MAINTAINER_ACCESS, + 'maintainer': gitlab.const.MAINTAINER_ACCESS, + 'owner': gitlab.const.OWNER_ACCESS, } ''' @@ -342,9 +337,10 @@ class GitLabUser(object): @param sshkey_name Name of the ssh key ''' def ssh_key_exists(self, user, sshkey_name): - keyList = map(lambda k: k.title, user.keys.list(all=True)) - - return sshkey_name in keyList + return any( + k.title == sshkey_name + for k in user.keys.list(**list_all_kwargs) + ) ''' @param user User object @@ -478,7 +474,7 @@ class GitLabUser(object): ''' @param user User object - @param identites List of identities to be added/updated + @param identities List of identities to be added/updated @param overwrite_identities Overwrite user identities with identities passed to this module ''' def add_identities(self, user, identities, overwrite_identities=False): @@ -497,7 +493,7 @@ class GitLabUser(object): ''' @param user User object - @param identites List of identities to be added/updated + @param identities List of identities to be added/updated ''' def delete_identities(self, user, identities): changed = False @@ -512,10 +508,13 @@ class GitLabUser(object): @param username Username of the user ''' def find_user(self, username): - users = self._gitlab.users.list(search=username, all=True) - for user in users: - if (user.username == username): - return user + return next( + ( + user for user in self._gitlab.users.list(search=username, **list_all_kwargs) + if user.username == username + ), + None + ) ''' @param username Username of the user @@ -609,7 +608,9 @@ def main(): ('state', 'present', ['name', 'email']), ) ) - ensure_gitlab_package(module) + + # check prerequisites and connect to gitlab server + gitlab_instance = gitlab_authentication(module) user_name = module.params['name'] state = module.params['state'] @@ -628,8 +629,6 @@ def main(): user_identities = module.params['identities'] overwrite_identities = module.params['overwrite_identities'] - gitlab_instance = gitlab_authentication(module) - gitlab_user = GitLabUser(module, gitlab_instance) user_exists = gitlab_user.exists_user(user_username) if user_exists: diff --git a/plugins/modules/grove.py b/plugins/modules/grove.py index d3ad6bb019..fc71322688 100644 --- a/plugins/modules/grove.py +++ b/plugins/modules/grove.py @@ -1,21 +1,24 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: grove short_description: Sends a notification to a grove.io channel description: - - The C(grove) module sends a message for a service to a Grove.io - channel. + - The C(grove) module sends a message for a service to a Grove.io channel. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: channel_token: type: str @@ -25,44 +28,45 @@ options: service: type: str description: - - Name of the service (displayed as the "user" in the message) + - Name of the service (displayed as the "user" in the message). required: false default: ansible message_content: type: str description: - Message content. - - The alias I(message) is deprecated and will be removed in community.general 4.0.0. + - The alias O(ignore:message) has been removed in community.general 4.0.0. required: true url: type: str description: - - Service URL for the web client + - Service URL for the web client. required: false icon_url: type: str description: - - Icon for the service + - Icon for the service. required: false validate_certs: description: - - If C(false), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. default: true type: bool author: "Jonas Pfenniger (@zimbatm)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Sends a notification to a grove.io channel community.general.grove: channel_token: 6Ph62VBBJOccmtTPZbubiPzdrhipZXtg service: my-app message: 'deployed {{ target }}' -''' +""" + +from urllib.parse import urlencode from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.urls import fetch_url @@ -96,8 +100,8 @@ def main(): channel_token=dict(type='str', required=True, no_log=True), message_content=dict(type='str', required=True), service=dict(type='str', default='ansible'), - url=dict(type='str', default=None), - icon_url=dict(type='str', default=None), + url=dict(type='str'), + icon_url=dict(type='str'), validate_certs=dict(default=True, type='bool'), ) ) diff --git a/plugins/modules/gunicorn.py b/plugins/modules/gunicorn.py index ff88cead7c..b7033d3471 100644 --- a/plugins/modules/gunicorn.py +++ b/plugins/modules/gunicorn.py @@ -1,27 +1,29 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017, Alejandro Gomez # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: gunicorn short_description: Run gunicorn with various settings description: - - Starts gunicorn with the parameters specified. Common settings for gunicorn - configuration are supported. For additional configuration use a config file - See U(https://gunicorn-docs.readthedocs.io/en/latest/settings.html) for more - options. It's recommended to always use the chdir option to avoid problems - with the location of the app. + - Starts gunicorn with the parameters specified. Common settings for gunicorn configuration are supported. For additional + configuration use a config file See U(https://gunicorn-docs.readthedocs.io/en/latest/settings.html) for more options. + It's recommended to always use the chdir option to avoid problems with the location of the app. requirements: [gunicorn] author: - - "Alejandro Gomez (@agmezr)" + - "Alejandro Gomez (@agmezr)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: app: type: str @@ -33,37 +35,36 @@ options: type: path aliases: ['virtualenv'] description: - - 'Path to the virtualenv directory.' + - Path to the virtualenv directory. config: type: path description: - - 'Path to the gunicorn configuration file.' + - Path to the gunicorn configuration file. aliases: ['conf'] chdir: type: path description: - - 'Chdir to specified directory before apps loading.' + - Chdir to specified directory before apps loading. pid: type: path description: - - 'A filename to use for the PID file. If not set and not found on the configuration file a tmp - pid file will be created to check a successful run of gunicorn.' + - A filename to use for the PID file. If not set and not found on the configuration file a tmp pid file is created to + check a successful run of gunicorn. worker: type: str choices: ['sync', 'eventlet', 'gevent', 'tornado ', 'gthread', 'gaiohttp'] description: - - 'The type of workers to use. The default class (sync) should handle most "normal" types of workloads.' + - The type of workers to use. The default class (sync) should handle most "normal" types of workloads. user: type: str description: - - 'Switch worker processes to run as this user.' + - Switch worker processes to run as this user. notes: - - If not specified on config file, a temporary error log will be created on /tmp dir. - Please make sure you have write access in /tmp dir. Not needed but will help you to - identify any problem with configuration. -''' + - If not specified on config file, a temporary error log is created on C(/tmp) directory. Please make sure you have write + access in C(/tmp) directory. Not needed but it is helpful to identify any problem with configuration. +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Simple gunicorn run example community.general.gunicorn: app: 'wsgi' @@ -89,15 +90,15 @@ EXAMPLES = ''' venv: '/workspace/example/venv' pid: '/workspace/example/gunicorn.pid' user: 'ansible' -''' +""" -RETURN = ''' +RETURN = r""" gunicorn: - description: process id of gunicorn - returned: changed - type: str - sample: "1234" -''' + description: Process ID of gunicorn. + returned: changed + type: str + sample: "1234" +""" import os import time diff --git a/plugins/modules/hana_query.py b/plugins/modules/hana_query.py deleted file mode 100644 index 746b2a3f44..0000000000 --- a/plugins/modules/hana_query.py +++ /dev/null @@ -1,214 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright (c) 2021, Rainer Leber -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' ---- -module: hana_query -short_description: Execute SQL on HANA -version_added: 3.2.0 -description: This module executes SQL statements on HANA with hdbsql. -options: - sid: - description: The system ID. - type: str - required: true - instance: - description: The instance number. - type: str - required: true - user: - description: A dedicated username. The user could be also in hdbuserstore. Defaults to C(SYSTEM). - type: str - default: SYSTEM - userstore: - description: If C(true) the user must be in hdbuserstore. - type: bool - default: false - version_added: 3.5.0 - password: - description: - - The password to connect to the database. - - "B(Note:) Since the passwords have to be passed as command line arguments, I(userstore=true) should - be used whenever possible, as command line arguments can be seen by other users - on the same machine." - type: str - autocommit: - description: Autocommit the statement. - type: bool - default: true - host: - description: The Host IP address. The port can be defined as well. - type: str - database: - description: Define the database on which to connect. - type: str - encrypted: - description: Use encrypted connection. Defaults to C(false). - type: bool - default: false - filepath: - description: - - One or more files each containing one SQL query to run. - - Must be a string or list containing strings. - type: list - elements: path - query: - description: - - SQL query to run. - - Must be a string or list containing strings. Please note that if you supply a string, it will be split by commas (C(,)) to a list. - It is better to supply a one-element list instead to avoid mangled input. - type: list - elements: str -notes: - - Does not support C(check_mode). -author: - - Rainer Leber (@rainerleber) -''' - -EXAMPLES = r''' -- name: Simple select query - community.general.hana_query: - sid: "hdb" - instance: "01" - password: "Test123" - query: "select user_name from users" - -- name: Run several queries - community.general.hana_query: - sid: "hdb" - instance: "01" - password: "Test123" - query: - - "select user_name from users;" - - select * from SYSTEM; - host: "localhost" - autocommit: false - -- name: Run several queries from file - community.general.hana_query: - sid: "hdb" - instance: "01" - password: "Test123" - filepath: - - /tmp/HANA_CPU_UtilizationPerCore_2.00.020+.txt - - /tmp/HANA.txt - host: "localhost" - -- name: Run several queries from user store - community.general.hana_query: - sid: "hdb" - instance: "01" - user: hdbstoreuser - userstore: true - query: - - "select user_name from users;" - - select * from users; - autocommit: false -''' - -RETURN = r''' -query_result: - description: List containing results of all queries executed (one sublist for every query). - returned: on success - type: list - elements: list - sample: [[{"Column": "Value1"}, {"Column": "Value2"}], [{"Column": "Value1"}, {"Column": "Value2"}]] -''' - -import csv -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import StringIO -from ansible.module_utils.common.text.converters import to_native - - -def csv_to_list(rawcsv): - reader_raw = csv.DictReader(StringIO(rawcsv)) - reader = [dict((k, v.strip()) for k, v in row.items()) for row in reader_raw] - return list(reader) - - -def main(): - module = AnsibleModule( - argument_spec=dict( - sid=dict(type='str', required=True), - instance=dict(type='str', required=True), - encrypted=dict(type='bool', default=False), - host=dict(type='str', required=False), - user=dict(type='str', default="SYSTEM"), - userstore=dict(type='bool', default=False), - password=dict(type='str', no_log=True), - database=dict(type='str', required=False), - query=dict(type='list', elements='str', required=False), - filepath=dict(type='list', elements='path', required=False), - autocommit=dict(type='bool', default=True), - ), - required_one_of=[('query', 'filepath')], - required_if=[('userstore', False, ['password'])], - supports_check_mode=False, - ) - rc, out, err, out_raw = [0, [], "", ""] - - params = module.params - - sid = (params['sid']).upper() - instance = params['instance'] - user = params['user'] - userstore = params['userstore'] - password = params['password'] - autocommit = params['autocommit'] - host = params['host'] - database = params['database'] - encrypted = params['encrypted'] - - filepath = params['filepath'] - query = params['query'] - - bin_path = "/usr/sap/{sid}/HDB{instance}/exe/hdbsql".format(sid=sid, instance=instance) - - try: - command = [module.get_bin_path(bin_path, required=True)] - except Exception as e: - module.fail_json(msg='Failed to find hdbsql at the expected path "{0}". Please check SID and instance number: "{1}"'.format(bin_path, to_native(e))) - - if encrypted is True: - command.extend(['-attemptencrypt']) - if autocommit is False: - command.extend(['-z']) - if host is not None: - command.extend(['-n', host]) - if database is not None: - command.extend(['-d', database]) - # -x Suppresses additional output, such as the number of selected rows in a result set. - if userstore: - command.extend(['-x', '-U', user]) - else: - command.extend(['-x', '-i', instance, '-u', user, '-p', password]) - - if filepath is not None: - command.extend(['-I']) - for p in filepath: - # makes a command like hdbsql -i 01 -u SYSTEM -p secret123# -I /tmp/HANA_CPU_UtilizationPerCore_2.00.020+.txt, - # iterates through files and append the output to var out. - query_command = command + [p] - (rc, out_raw, err) = module.run_command(query_command) - out.append(csv_to_list(out_raw)) - if query is not None: - for q in query: - # makes a command like hdbsql -i 01 -u SYSTEM -p secret123# "select user_name from users", - # iterates through multiple commands and append the output to var out. - query_command = command + [q] - (rc, out_raw, err) = module.run_command(query_command) - out.append(csv_to_list(out_raw)) - changed = True - - module.exit_json(changed=changed, rc=rc, query_result=out, stderr=err) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/haproxy.py b/plugins/modules/haproxy.py index 38033bc81a..5fd927ba4e 100644 --- a/plugins/modules/haproxy.py +++ b/plugins/modules/haproxy.py @@ -1,38 +1,41 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2014, Ravi Bhure # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: haproxy short_description: Enable, disable, and set weights for HAProxy backend servers using socket commands author: -- Ravi Bhure (@ravibhure) + - Ravi Bhure (@ravibhure) description: - - Enable, disable, drain and set weights for HAProxy backend servers using socket commands. + - Enable, disable, drain and set weights for HAProxy backend servers using socket commands. notes: - - Enable, disable and drain commands are restricted and can only be issued on - sockets configured for level 'admin'. For example, you can add the line - 'stats socket /var/run/haproxy.sock level admin' to the general section of - haproxy.cfg. See U(http://haproxy.1wt.eu/download/1.5/doc/configuration.txt). - - Depends on netcat (C(nc)) being available; you need to install the appropriate - package for your operating system before this module can be used. + - Enable, disable and drain commands are restricted and can only be issued on sockets configured for level C(admin). For + example, you can add the line C(stats socket /var/run/haproxy.sock level admin) to the general section of C(haproxy.cfg). + See U(http://haproxy.1wt.eu/download/1.5/doc/configuration.txt). + - Depends on netcat (C(nc)) being available; you need to install the appropriate package for your operating system before + this module can be used. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: backend: description: - Name of the HAProxy backend pool. - - If this parameter is unset, it will be auto-detected. + - If this parameter is unset, it is auto-detected. type: str drain: description: - - Wait until the server has no active connections or until the timeout - determined by wait_interval and wait_retries is reached. + - Wait until the server has no active connections or until the timeout determined by O(wait_interval) and O(wait_retries) + is reached. - Continue only after the status changes to C(MAINT). - This overrides the shutdown_sessions option. type: bool @@ -44,10 +47,9 @@ options: required: true shutdown_sessions: description: - - When disabling a server, immediately terminate all the sessions attached - to the specified server. - - This can be used to terminate long-running sessions after a server is put - into maintenance mode. Overridden by the drain option. + - When disabling a server, immediately terminate all the sessions attached to the specified server. + - This can be used to terminate long-running sessions after a server is put into maintenance mode. Overridden by the + drain option. type: bool default: false socket: @@ -58,21 +60,19 @@ options: state: description: - Desired state of the provided backend host. - - Note that C(drain) state was added in version 2.4. - - It is supported only by HAProxy version 1.5 or later, - - When used on versions < 1.5, it will be ignored. + - Note that V(drain) state is supported only by HAProxy version 1.5 or later. When used on versions < 1.5, it is ignored. type: str required: true - choices: [ disabled, drain, enabled ] + choices: [disabled, drain, enabled] agent: description: - - Disable/enable agent checks (depending on I(state) value). + - Disable/enable agent checks (depending on O(state) value). type: bool default: false version_added: 1.0.0 health: description: - - Disable/enable health checks (depending on I(state) value). + - Disable/enable health checks (depending on O(state) value). type: bool default: false version_added: "1.0.0" @@ -83,8 +83,8 @@ options: default: false wait: description: - - Wait until the server reports a status of C(UP) when I(state=enabled), - status of C(MAINT) when I(state=disabled) or status of C(DRAIN) when I(state=drain). + - Wait until the server reports a status of C(UP) when O(state=enabled), status of C(MAINT) when O(state=disabled) or + status of C(DRAIN) when O(state=drain). type: bool default: false wait_interval: @@ -100,14 +100,12 @@ options: weight: description: - The value passed in argument. - - If the value ends with the C(%) sign, then the new weight will be - relative to the initially configured weight. - - Relative weights are only permitted between 0 and 100% and absolute - weights are permitted between 0 and 256. + - If the value ends with the V(%) sign, then the new weight is relative to the initially configured weight. + - Relative weights are only permitted between 0 and 100% and absolute weights are permitted between 0 and 256. type: str -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Disable server in 'www' backend pool community.general.haproxy: state: disabled @@ -162,7 +160,8 @@ EXAMPLES = r''' socket: /var/run/haproxy.sock shutdown_sessions: true -- name: Disable server without backend pool name (apply to all available backend pool) but fail when the backend host is not found +- name: Disable server without backend pool name (apply to all available backend pool) but fail when the backend host is + not found community.general.haproxy: state: disabled host: '{{ inventory_hostname }}' @@ -181,7 +180,8 @@ EXAMPLES = r''' backend: www wait: true -- name: Enable server in 'www' backend pool wait until healthy. Retry 10 times with intervals of 5 seconds to retrieve the health +- name: Enable server in 'www' backend pool wait until healthy. Retry 10 times with intervals of 5 seconds to retrieve the + health community.general.haproxy: state: enabled host: '{{ inventory_hostname }}' @@ -204,7 +204,7 @@ EXAMPLES = r''' host: '{{ inventory_hostname }}' socket: /var/run/haproxy.sock backend: www -''' +""" import csv import socket @@ -297,7 +297,7 @@ class HAProxy(object): """ data = self.execute('show stat', 200, False).lstrip('# ') r = csv.DictReader(data.splitlines()) - return tuple(map(lambda d: d['pxname'], filter(lambda d: d['svname'] == 'BACKEND', r))) + return tuple(d['pxname'] for d in r if d['svname'] == 'BACKEND') def discover_version(self): """ @@ -336,7 +336,7 @@ class HAProxy(object): if state is not None: self.execute(Template(cmd).substitute(pxname=backend, svname=svname)) - if self.wait: + if self.wait and not (wait_for_status == "DRAIN" and state == "DOWN"): self.wait_until_status(backend, svname, wait_for_status) def get_state_for(self, pxname, svname): @@ -346,13 +346,11 @@ class HAProxy(object): """ data = self.execute('show stat', 200, False).lstrip('# ') r = csv.DictReader(data.splitlines()) - state = tuple( - map( - lambda d: {'status': d['status'], 'weight': d['weight'], 'scur': d['scur']}, - filter(lambda d: (pxname is None or d['pxname'] - == pxname) and d['svname'] == svname, r) - ) - ) + + def unpack_state(d): + return {'status': d['status'], 'weight': d['weight'], 'scur': d['scur']} + + state = tuple(unpack_state(d) for d in r if (pxname is None or d['pxname'] == pxname) and d['svname'] == svname) return state or None def wait_until_status(self, pxname, svname, status): diff --git a/plugins/modules/heroku_collaborator.py b/plugins/modules/heroku_collaborator.py index d76b2b6507..e8094760a6 100644 --- a/plugins/modules/heroku_collaborator.py +++ b/plugins/modules/heroku_collaborator.py @@ -1,59 +1,63 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2018, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: heroku_collaborator short_description: Add or delete app collaborators on Heroku description: - Manages collaborators for Heroku apps. - - If set to C(present) and heroku user is already collaborator, then do nothing. - - If set to C(present) and heroku user is not collaborator, then add user to app. - - If set to C(absent) and heroku user is collaborator, then delete user from app. + - If set to V(present) and heroku user is already collaborator, then do nothing. + - If set to V(present) and heroku user is not collaborator, then add user to app. + - If set to V(absent) and heroku user is collaborator, then delete user from app. author: - Marcel Arns (@marns93) requirements: - heroku3 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: api_key: type: str description: - - Heroku API key + - Heroku API key. apps: type: list elements: str description: - - List of Heroku App names + - List of Heroku App names. required: true suppress_invitation: description: - - Suppress email invitation when creating collaborator + - Suppress email invitation when creating collaborator. type: bool default: false user: type: str description: - - User ID or e-mail + - User ID or e-mail. required: true state: type: str description: - - Create or remove the heroku collaborator + - Create or remove the heroku collaborator. choices: ["present", "absent"] default: "present" notes: - - C(HEROKU_API_KEY) and C(TF_VAR_HEROKU_API_KEY) env variable can be used instead setting C(api_key). - - If you use I(--check), you can also pass the I(-v) flag to see affected apps in C(msg), e.g. ["heroku-example-app"]. -''' + - E(HEROKU_API_KEY) and E(TF_VAR_HEROKU_API_KEY) environment variables can be used instead setting O(api_key). + - If you use C(check_mode), you can also pass the C(-v) flag to see affected apps in C(msg), for example C(["heroku-example-app"]). +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a heroku collaborator community.general.heroku_collaborator: api_key: YOUR_API_KEY @@ -69,12 +73,12 @@ EXAMPLES = ''' suppress_invitation: '{{ item.suppress_invitation | default(suppress_invitation) }}' state: '{{ item.state | default("present") }}' with_items: - - { user: 'a.b@example.com' } - - { state: 'absent', user: 'b.c@example.com', suppress_invitation: false } - - { user: 'x.y@example.com', apps: ["heroku-example-app"] } -''' + - {user: 'a.b@example.com'} + - {state: 'absent', user: 'b.c@example.com', suppress_invitation: false} + - {user: 'x.y@example.com', apps: ["heroku-example-app"]} +""" -RETURN = ''' # ''' +RETURN = """ # """ from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.heroku import HerokuHelper diff --git a/plugins/modules/hg.py b/plugins/modules/hg.py index 777feaa059..afd3e59dd3 100644 --- a/plugins/modules/hg.py +++ b/plugins/modules/hg.py @@ -1,77 +1,77 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2013, Yeukhon Wong # Copyright (c) 2014, Nate Coraor # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hg short_description: Manages Mercurial (hg) repositories description: - - Manages Mercurial (hg) repositories. Supports SSH, HTTP/S and local address. + - Manages Mercurial (hg) repositories. Supports SSH, HTTP/S and local address. author: "Yeukhon Wong (@yeukhon)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - repo: - description: - - The repository address. - required: true - aliases: [ name ] - type: str - dest: - description: - - Absolute path of where the repository should be cloned to. - This parameter is required, unless clone and update are set to no - type: path - revision: - description: - - Equivalent C(-r) option in hg command which could be the changeset, revision number, - branch name or even tag. - aliases: [ version ] - type: str - force: - description: - - Discards uncommitted changes. Runs C(hg update -C). Prior to - 1.9, the default was C(true). - type: bool - default: false - purge: - description: - - Deletes untracked files. Runs C(hg purge). - type: bool - default: false - update: - description: - - If C(false), do not retrieve new revisions from the origin repository - type: bool - default: true - clone: - description: - - If C(false), do not clone the repository if it does not exist locally. - type: bool - default: true - executable: - description: - - Path to hg executable to use. If not supplied, - the normal mechanism for resolving binary paths will be used. - type: str + repo: + description: + - The repository address. + required: true + aliases: [name] + type: str + dest: + description: + - Absolute path of where the repository should be cloned to. This parameter is required, unless clone and update are + set to no. + type: path + revision: + description: + - Equivalent C(-r) option in hg command which could be the changeset, revision number, branch name or even tag. + aliases: [version] + type: str + force: + description: + - Discards uncommitted changes. Runs C(hg update -C). + type: bool + default: false + purge: + description: + - Deletes untracked files. Runs C(hg purge). + type: bool + default: false + update: + description: + - If V(false), do not retrieve new revisions from the origin repository. + type: bool + default: true + clone: + description: + - If V(false), do not clone the repository if it does not exist locally. + type: bool + default: true + executable: + description: + - Path to C(hg) executable to use. If not supplied, the normal mechanism for resolving binary paths is used. + type: str notes: - - This module does not support push capability. See U(https://github.com/ansible/ansible/issues/31156). - - "If the task seems to be hanging, first verify remote host is in C(known_hosts). - SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt, - one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling - the hg module, with the following command: ssh-keyscan remote_host.com >> /etc/ssh/ssh_known_hosts." - - As per 01 Dec 2018, Bitbucket has dropped support for TLSv1 and TLSv1.1 connections. As such, - if the underlying system still uses a Python version below 2.7.9, you will have issues checking out - bitbucket repositories. See U(https://bitbucket.org/blog/deprecating-tlsv1-tlsv1-1-2018-12-01). -''' + - This module does not support push capability. See U(https://github.com/ansible/ansible/issues/31156). + - 'If the task seems to be hanging, first verify remote host is in C(known_hosts). SSH prompts user to authorize the first + contact with a remote host. To avoid this prompt, one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) + before calling the hg module, with the following command: C(ssh-keyscan remote_host.com >> /etc/ssh/ssh_known_hosts).' + - As per 01 Dec 2018, Bitbucket has dropped support for TLSv1 and TLSv1.1 connections. As such, if the underlying system + still uses a Python version below 2.7.9, you are bound to have issues checking out bitbucket repositories. See + U(https://bitbucket.org/blog/deprecating-tlsv1-tlsv1-1-2018-12-01). +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Ensure the current working copy is inside the stable branch and deletes untracked files if any. community.general.hg: repo: https://bitbucket.org/user/repo1 @@ -85,7 +85,7 @@ EXAMPLES = ''' dest: /srv/checkout clone: false update: false -''' +""" import os @@ -203,7 +203,7 @@ class Hg(object): if the desired changeset is already the current changeset. """ if self.revision is None or len(self.revision) < 7: - # Assume it's a rev number, tag, or branch + # Assume it is a rev number, tag, or branch return False (rc, out, err) = self._command(['--debug', 'id', '-i', '-R', self.dest]) if rc != 0: @@ -220,12 +220,12 @@ def main(): argument_spec=dict( repo=dict(type='str', required=True, aliases=['name']), dest=dict(type='path'), - revision=dict(type='str', default=None, aliases=['version']), + revision=dict(type='str', aliases=['version']), force=dict(type='bool', default=False), purge=dict(type='bool', default=False), update=dict(type='bool', default=True), clone=dict(type='bool', default=True), - executable=dict(type='str', default=None), + executable=dict(type='str'), ), ) repo = module.params['repo'] diff --git a/plugins/modules/hipchat.py b/plugins/modules/hipchat.py deleted file mode 100644 index a5aa150f32..0000000000 --- a/plugins/modules/hipchat.py +++ /dev/null @@ -1,213 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# -# Copyright Ansible Project -# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) -# SPDX-License-Identifier: GPL-3.0-or-later - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: hipchat -short_description: Send a message to Hipchat -description: - - Send a message to a Hipchat room, with options to control the formatting. -options: - token: - type: str - description: - - API token. - required: true - room: - type: str - description: - - ID or name of the room. - required: true - msg_from: - type: str - description: - - Name the message will appear to be sent from. Max length is 15 - characters - above this it will be truncated. - default: Ansible - aliases: [from] - msg: - type: str - description: - - The message body. - required: true - color: - type: str - description: - - Background color for the message. - default: yellow - choices: [ "yellow", "red", "green", "purple", "gray", "random" ] - msg_format: - type: str - description: - - Message format. - default: text - choices: [ "text", "html" ] - notify: - description: - - If true, a notification will be triggered for users in the room. - type: bool - default: true - validate_certs: - description: - - If C(false), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. - type: bool - default: true - api: - type: str - description: - - API url if using a self-hosted hipchat server. For Hipchat API version - 2 use the default URI with C(/v2) instead of C(/v1). - default: 'https://api.hipchat.com/v1' - -author: -- Shirou Wakayama (@shirou) -- Paul Bourdel (@pb8226) -''' - -EXAMPLES = ''' -- name: Send a message to a Hipchat room - community.general.hipchat: - room: notif - msg: Ansible task finished - -- name: Send a message to a Hipchat room using Hipchat API version 2 - community.general.hipchat: - api: https://api.hipchat.com/v2/ - token: OAUTH2_TOKEN - room: notify - msg: Ansible task finished -''' - -# =========================================== -# HipChat module specific support methods. -# - -import json -import traceback - -from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.six.moves.urllib.request import pathname2url -from ansible.module_utils.common.text.converters import to_native -from ansible.module_utils.urls import fetch_url - - -DEFAULT_URI = "https://api.hipchat.com/v1" - -MSG_URI_V1 = "/rooms/message" - -NOTIFY_URI_V2 = "/room/{id_or_name}/notification" - - -def send_msg_v1(module, token, room, msg_from, msg, msg_format='text', - color='yellow', notify=False, api=MSG_URI_V1): - '''sending message to hipchat v1 server''' - - params = {} - params['room_id'] = room - params['from'] = msg_from[:15] # max length is 15 - params['message'] = msg - params['message_format'] = msg_format - params['color'] = color - params['api'] = api - params['notify'] = int(notify) - - url = api + MSG_URI_V1 + "?auth_token=%s" % (token) - data = urlencode(params) - - if module.check_mode: - # In check mode, exit before actually sending the message - module.exit_json(changed=False) - - response, info = fetch_url(module, url, data=data) - if info['status'] == 200: - return response.read() - else: - module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) - - -def send_msg_v2(module, token, room, msg_from, msg, msg_format='text', - color='yellow', notify=False, api=NOTIFY_URI_V2): - '''sending message to hipchat v2 server''' - - headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'} - - body = dict() - body['message'] = msg - body['color'] = color - body['message_format'] = msg_format - body['notify'] = notify - - POST_URL = api + NOTIFY_URI_V2 - - url = POST_URL.replace('{id_or_name}', pathname2url(room)) - data = json.dumps(body) - - if module.check_mode: - # In check mode, exit before actually sending the message - module.exit_json(changed=False) - - response, info = fetch_url(module, url, data=data, headers=headers, method='POST') - - # https://www.hipchat.com/docs/apiv2/method/send_room_notification shows - # 204 to be the expected result code. - if info['status'] in [200, 204]: - return response.read() - else: - module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) - - -# =========================================== -# Module execution. -# - -def main(): - - module = AnsibleModule( - argument_spec=dict( - token=dict(required=True, no_log=True), - room=dict(required=True), - msg=dict(required=True), - msg_from=dict(default="Ansible", aliases=['from']), - color=dict(default="yellow", choices=["yellow", "red", "green", - "purple", "gray", "random"]), - msg_format=dict(default="text", choices=["text", "html"]), - notify=dict(default=True, type='bool'), - validate_certs=dict(default=True, type='bool'), - api=dict(default=DEFAULT_URI), - ), - supports_check_mode=True - ) - - token = module.params["token"] - room = str(module.params["room"]) - msg = module.params["msg"] - msg_from = module.params["msg_from"] - color = module.params["color"] - msg_format = module.params["msg_format"] - notify = module.params["notify"] - api = module.params["api"] - - try: - if api.find('/v2') != -1: - send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api) - else: - send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api) - except Exception as e: - module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc()) - - changed = True - module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/homebrew.py b/plugins/modules/homebrew.py index a72d929a81..2b0e4408a2 100644 --- a/plugins/modules/homebrew.py +++ b/plugins/modules/homebrew.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2013, Andrew Dunham # Copyright (c) 2013, Daniel Jaouen @@ -10,78 +9,89 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: homebrew author: - - "Indrajit Raychaudhuri (@indrajitr)" - - "Daniel Jaouen (@danieljaouen)" - - "Andrew Dunham (@andrew-d)" + - "Indrajit Raychaudhuri (@indrajitr)" + - "Daniel Jaouen (@danieljaouen)" + - "Andrew Dunham (@andrew-d)" requirements: - - "python >= 2.6" - - homebrew must already be installed on the target system + - homebrew must already be installed on the target system short_description: Package manager for Homebrew description: - - Manages Homebrew packages + - Manages Homebrew packages. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - A list of names of packages to install/remove. - aliases: [ 'formula', 'package', 'pkg' ] - type: list - elements: str - path: - description: - - "A C(:) separated list of paths to search for C(brew) executable. - Since a package (I(formula) in homebrew parlance) location is prefixed relative to the actual path of I(brew) command, - providing an alternative I(brew) path enables managing different set of packages in an alternative location in the system." - default: '/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin' - type: path - state: - description: - - state of the package. - choices: [ 'absent', 'head', 'installed', 'latest', 'linked', 'present', 'removed', 'uninstalled', 'unlinked', 'upgraded' ] - default: present - type: str - update_homebrew: - description: - - update homebrew itself first. - type: bool - default: false - upgrade_all: - description: - - upgrade all homebrew packages. - type: bool - default: false - aliases: ['upgrade'] - install_options: - description: - - options flags to install a package. - aliases: ['options'] - type: list - elements: str - upgrade_options: - description: - - Option flags to upgrade. - type: list - elements: str - version_added: '0.2.0' + name: + description: + - A list of names of packages to install/remove. + aliases: ['formula', 'package', 'pkg'] + type: list + elements: str + path: + description: + - A V(:) separated list of paths to search for C(brew) executable. Since a package (I(formula) in homebrew parlance) + location is prefixed relative to the actual path of C(brew) command, providing an alternative C(brew) path enables + managing different set of packages in an alternative location in the system. + default: '/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin' + type: path + state: + description: + - State of the package. + choices: ['absent', 'head', 'installed', 'latest', 'linked', 'present', 'removed', 'uninstalled', 'unlinked', 'upgraded'] + default: present + type: str + update_homebrew: + description: + - Update homebrew itself first. + type: bool + default: false + upgrade_all: + description: + - Upgrade all homebrew packages. + type: bool + default: false + aliases: ['upgrade'] + install_options: + description: + - Options flags to install a package. + aliases: ['options'] + type: list + elements: str + upgrade_options: + description: + - Option flags to upgrade. + type: list + elements: str + version_added: '0.2.0' + force_formula: + description: + - Force the package(s) to be treated as a formula (equivalent to C(brew --formula)). + - To install a cask, use the M(community.general.homebrew_cask) module. + type: bool + default: false + version_added: 9.0.0 notes: - - When used with a C(loop:) each package will be processed individually, - it is much more efficient to pass the list directly to the I(name) option. -''' + - When used with a C(loop:) each package is processed individually, it is much more efficient to pass the list directly + to the O(name) option. +""" -EXAMPLES = ''' +EXAMPLES = r""" # Install formula foo with 'brew' in default path - community.general.homebrew: name: foo state: present -# Install formula foo with 'brew' in alternate path C(/my/other/location/bin) +# Install formula foo with 'brew' in alternate path (/my/other/location/bin) - community.general.homebrew: name: foo path: /my/other/location/bin @@ -135,35 +145,42 @@ EXAMPLES = ''' community.general.homebrew: upgrade_all: true upgrade_options: ignore-pinned -''' -RETURN = ''' +- name: Force installing a formula whose name is also a cask name + community.general.homebrew: + name: ambiguous_formula + state: present + force_formula: true +""" + +RETURN = r""" msg: - description: if the cache was updated or not - returned: always - type: str - sample: "Changed: 0, Unchanged: 2" + description: If the cache was updated or not. + returned: always + type: str + sample: "Changed: 0, Unchanged: 2" unchanged_pkgs: - description: - - List of package names which are unchanged after module run - returned: success - type: list - sample: ["awscli", "ag"] - version_added: '0.2.0' + description: + - List of package names which are unchanged after module run. + returned: success + type: list + sample: ["awscli", "ag"] + version_added: '0.2.0' changed_pkgs: - description: - - List of package names which are changed after module run - returned: success - type: list - sample: ['git', 'git-cola'] - version_added: '0.2.0' -''' + description: + - List of package names which are changed after module run. + returned: success + type: list + sample: ["git", "git-cola"] + version_added: '0.2.0' +""" -import os.path +import json import re +from ansible_collections.community.general.plugins.module_utils.homebrew import HomebrewValidate + from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import iteritems, string_types # exceptions -------------------------------------------------------------- {{{ @@ -175,107 +192,20 @@ class HomebrewException(Exception): # utils ------------------------------------------------------------------- {{{ def _create_regex_group_complement(s): lines = (line.strip() for line in s.split('\n') if line.strip()) - chars = filter(None, (line.split('#')[0].strip() for line in lines)) + chars = [_f for _f in (line.split('#')[0].strip() for line in lines) if _f] group = r'[^' + r''.join(chars) + r']' return re.compile(group) + + +def _check_package_in_json(json_output, package_type): + return bool(json_output.get(package_type, []) and json_output[package_type][0].get("installed")) # /utils ------------------------------------------------------------------ }}} class Homebrew(object): '''A class to manage Homebrew packages.''' - # class regexes ------------------------------------------------ {{{ - VALID_PATH_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - \s # spaces - : # colons - {sep} # the OS-specific path separator - . # dots - \- # dashes - '''.format(sep=os.path.sep) - - VALID_BREW_PATH_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - \s # spaces - {sep} # the OS-specific path separator - . # dots - \- # dashes - '''.format(sep=os.path.sep) - - VALID_PACKAGE_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - . # dots - / # slash (for taps) - \+ # plusses - \- # dashes - : # colons (for URLs) - @ # at-sign - ''' - - INVALID_PATH_REGEX = _create_regex_group_complement(VALID_PATH_CHARS) - INVALID_BREW_PATH_REGEX = _create_regex_group_complement(VALID_BREW_PATH_CHARS) - INVALID_PACKAGE_REGEX = _create_regex_group_complement(VALID_PACKAGE_CHARS) - # /class regexes ----------------------------------------------- }}} - # class validations -------------------------------------------- {{{ - @classmethod - def valid_path(cls, path): - ''' - `path` must be one of: - - list of paths - - a string containing only: - - alphanumeric characters - - dashes - - dots - - spaces - - colons - - os.path.sep - ''' - - if isinstance(path, string_types): - return not cls.INVALID_PATH_REGEX.search(path) - - try: - iter(path) - except TypeError: - return False - else: - paths = path - return all(cls.valid_brew_path(path_) for path_ in paths) - - @classmethod - def valid_brew_path(cls, brew_path): - ''' - `brew_path` must be one of: - - None - - a string containing only: - - alphanumeric characters - - dashes - - dots - - spaces - - os.path.sep - ''' - - if brew_path is None: - return True - - return ( - isinstance(brew_path, string_types) - and not cls.INVALID_BREW_PATH_REGEX.search(brew_path) - ) - - @classmethod - def valid_package(cls, package): - '''A valid package is either None or alphanumeric.''' - - if package is None: - return True - - return ( - isinstance(package, string_types) - and not cls.INVALID_PACKAGE_REGEX.search(package) - ) - @classmethod def valid_state(cls, state): ''' @@ -293,7 +223,7 @@ class Homebrew(object): return True else: return ( - isinstance(state, string_types) + isinstance(state, str) and state.lower() in ( 'installed', 'upgraded', @@ -335,14 +265,14 @@ class Homebrew(object): @path.setter def path(self, path): - if not self.valid_path(path): + if not HomebrewValidate.valid_path(path): self._path = [] self.failed = True self.message = 'Invalid path: {0}.'.format(path) raise HomebrewException(self.message) else: - if isinstance(path, string_types): + if isinstance(path, str): self._path = path.split(':') else: self._path = path @@ -355,7 +285,7 @@ class Homebrew(object): @brew_path.setter def brew_path(self, brew_path): - if not self.valid_brew_path(brew_path): + if not HomebrewValidate.valid_brew_path(brew_path): self._brew_path = None self.failed = True self.message = 'Invalid brew_path: {0}.'.format(brew_path) @@ -374,26 +304,12 @@ class Homebrew(object): self._params = self.module.params return self._params - @property - def current_package(self): - return self._current_package - - @current_package.setter - def current_package(self, package): - if not self.valid_package(package): - self._current_package = None - self.failed = True - self.message = 'Invalid package: {0}.'.format(package) - raise HomebrewException(self.message) - - else: - self._current_package = package - return package # /class properties -------------------------------------------- }}} def __init__(self, module, path, packages=None, state=None, update_homebrew=False, upgrade_all=False, - install_options=None, upgrade_options=None): + install_options=None, upgrade_options=None, + force_formula=False): if not install_options: install_options = list() if not upgrade_options: @@ -403,7 +319,8 @@ class Homebrew(object): state=state, update_homebrew=update_homebrew, upgrade_all=upgrade_all, install_options=install_options, - upgrade_options=upgrade_options,) + upgrade_options=upgrade_options, + force_formula=force_formula) self._prep() @@ -411,14 +328,14 @@ class Homebrew(object): def _setup_status_vars(self): self.failed = False self.changed = False - self.changed_count = 0 - self.unchanged_count = 0 self.changed_pkgs = [] self.unchanged_pkgs = [] self.message = '' def _setup_instance_vars(self, **kwargs): - for key, val in iteritems(kwargs): + self.installed_packages = set() + self.outdated_packages = set() + for key, val in kwargs.items(): setattr(self, key, val) def _prep(self): @@ -444,8 +361,98 @@ class Homebrew(object): return self.brew_path - def _status(self): - return (self.failed, self.changed, self.message) + def _validate_packages_names(self): + invalid_packages = [] + for package in self.packages: + if not HomebrewValidate.valid_package(package): + invalid_packages.append(package) + + if invalid_packages: + self.failed = True + self.message = 'Invalid package{0}: {1}'.format( + "s" if len(invalid_packages) > 1 else "", + ", ".join(invalid_packages), + ) + raise HomebrewException(self.message) + + def _save_package_info(self, package_detail, package_name): + if bool(package_detail.get("installed")): + self.installed_packages.add(package_name) + if bool(package_detail.get("outdated")): + self.outdated_packages.add(package_name) + + def _extract_package_name(self, package_detail, is_cask): + # "brew info" can lookup by name, full_name, token, full_token, + # oldnames, old_tokens, or aliases. In addition, any of the + # above names can be prefixed by the tap. Any of these can be + # supplied by the user as the package name. In case of + # ambiguity, where a given name might match multiple packages, + # formulae are preferred over casks. For all other ambiguities, + # the results are an error. Note that in the homebrew/core and + # homebrew/cask taps, there are no "other" ambiguities. + if is_cask: # according to brew info + name = package_detail["token"] + full_name = package_detail["full_token"] + else: + name = package_detail["name"] + full_name = package_detail["full_name"] + + # Issue https://github.com/ansible-collections/community.general/issues/9803: + # name can include the tap as a prefix, in order to disambiguate, + # e.g. casks from identically named formulae. + # + # Issue https://github.com/ansible-collections/community.general/issues/10012: + # package_detail["tap"] is None if package is no longer available. + # + # Issue https://github.com/ansible-collections/community.general/issues/10804 + # name can be an alias, oldnames or old_tokens optionally prefixed by tap + package_names = {name, full_name} + package_names.update(package_detail.get("aliases", [])) + package_names.update(package_detail.get("oldnames", [])) + package_names.update(package_detail.get("old_tokens", [])) + if package_detail['tap']: + # names so far, with tap prefix added to each + tapped_names = {package_detail["tap"] + "/" + x for x in package_names} + package_names.update(tapped_names) + + # Finally, identify which of all those package names was the one supplied by the user. + package_names = package_names & set(self.packages) + if len(package_names) != 1: + self.failed = True + self.message = "Package names for {name} are missing or ambiguous: {packages}".format( + name=name, + packages=", ".join(str(p) for p in package_names), + ) + raise HomebrewException(self.message) + + # Then make sure the user provided name resurface. + return package_names.pop() + + def _get_packages_info(self): + cmd = [ + "{brew_path}".format(brew_path=self.brew_path), + "info", + "--json=v2", + ] + cmd.extend(self.packages) + if self.force_formula: + cmd.append("--formula") + + rc, out, err = self.module.run_command(cmd) + if rc != 0: + self.failed = True + self.message = err.strip() or ("Unknown failure with exit code %d" % rc) + raise HomebrewException(self.message) + + data = json.loads(out) + for package_detail in data.get("formulae", []): + package_name = self._extract_package_name(package_detail, is_cask=False) + self._save_package_info(package_detail, package_name) + + for package_detail in data.get("casks", []): + package_name = self._extract_package_name(package_detail, is_cask=True) + self._save_package_info(package_detail, package_name) + # /prep -------------------------------------------------------- }}} def run(self): @@ -454,68 +461,14 @@ class Homebrew(object): except HomebrewException: pass - if not self.failed and (self.changed_count + self.unchanged_count > 1): + changed_count = len(self.changed_pkgs) + unchanged_count = len(self.unchanged_pkgs) + if not self.failed and (changed_count + unchanged_count > 1): self.message = "Changed: %d, Unchanged: %d" % ( - self.changed_count, - self.unchanged_count, + changed_count, + unchanged_count, ) - (failed, changed, message) = self._status() - - return (failed, changed, message) - - # checks ------------------------------------------------------- {{{ - def _current_package_is_installed(self): - if not self.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - cmd = [ - "{brew_path}".format(brew_path=self.brew_path), - "info", - self.current_package, - ] - rc, out, err = self.module.run_command(cmd) - for line in out.split('\n'): - if ( - re.search(r'Built from source', line) - or re.search(r'Poured from bottle', line) - ): - return True - - return False - - def _current_package_is_outdated(self): - if not self.valid_package(self.current_package): - return False - - rc, out, err = self.module.run_command([ - self.brew_path, - 'outdated', - self.current_package, - ]) - - return rc != 0 - - def _current_package_is_installed_from_head(self): - if not Homebrew.valid_package(self.current_package): - return False - elif not self._current_package_is_installed(): - return False - - rc, out, err = self.module.run_command([ - self.brew_path, - 'info', - self.current_package, - ]) - - try: - version_info = [line for line in out.split('\n') if line][0] - except IndexError: - return False - - return version_info.split(' ')[-1] == 'HEAD' - # /checks ------------------------------------------------------ }}} + return (self.failed, self.changed, self.message) # commands ----------------------------------------------------- {{{ def _run(self): @@ -526,6 +479,8 @@ class Homebrew(object): self._upgrade_all() if self.packages: + self._validate_packages_names() + self._get_packages_info() if self.state == 'installed': return self._install_packages() elif self.state == 'upgraded': @@ -551,7 +506,7 @@ class Homebrew(object): 'update', ]) if rc == 0: - if out and isinstance(out, string_types): + if out and isinstance(out, str): already_updated = any( re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE) for s in out.split('\n') @@ -595,24 +550,22 @@ class Homebrew(object): # /_upgrade_all -------------------------- }}} # installed ------------------------------ {{{ - def _install_current_package(self): - if not self.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) + def _install_packages(self): + packages_to_install = set(self.packages) - self.installed_packages - if self._current_package_is_installed(): - self.unchanged_count += 1 - self.unchanged_pkgs.append(self.current_package) - self.message = 'Package already installed: {0}'.format( - self.current_package, + if len(packages_to_install) == 0: + self.unchanged_pkgs.extend(self.packages) + self.message = 'Package{0} already installed: {1}'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages), ) return True if self.module.check_mode: self.changed = True - self.message = 'Package would be installed: {0}'.format( - self.current_package + self.message = 'Package{0} would be installed: {1}'.format( + "s" if len(packages_to_install) > 1 else "", + ", ".join(packages_to_install) ) raise HomebrewException(self.message) @@ -621,79 +574,36 @@ class Homebrew(object): else: head = None + if self.force_formula: + formula = '--formula' + else: + formula = None + opts = ( [self.brew_path, 'install'] + self.install_options - + [self.current_package, head] + + list(packages_to_install) + + [head, formula] ) cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) - if self._current_package_is_installed(): - self.changed_count += 1 - self.changed_pkgs.append(self.current_package) + if rc == 0: + self.changed_pkgs.extend(packages_to_install) + self.unchanged_pkgs.extend(self.installed_packages) self.changed = True - self.message = 'Package installed: {0}'.format(self.current_package) + self.message = 'Package{0} installed: {1}'.format( + "s" if len(packages_to_install) > 1 else "", + ", ".join(packages_to_install) + ) return True else: self.failed = True self.message = err.strip() raise HomebrewException(self.message) - - def _install_packages(self): - for package in self.packages: - self.current_package = package - self._install_current_package() - - return True # /installed ----------------------------- }}} # upgraded ------------------------------- {{{ - def _upgrade_current_package(self): - command = 'upgrade' - - if not self.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - if not self._current_package_is_installed(): - command = 'install' - - if self._current_package_is_installed() and not self._current_package_is_outdated(): - self.message = 'Package is already upgraded: {0}'.format( - self.current_package, - ) - self.unchanged_count += 1 - self.unchanged_pkgs.append(self.current_package) - return True - - if self.module.check_mode: - self.changed = True - self.message = 'Package would be upgraded: {0}'.format( - self.current_package - ) - raise HomebrewException(self.message) - - opts = ( - [self.brew_path, command] - + self.install_options - + [self.current_package] - ) - cmd = [opt for opt in opts if opt] - rc, out, err = self.module.run_command(cmd) - - if self._current_package_is_installed() and not self._current_package_is_outdated(): - self.changed_count += 1 - self.changed_pkgs.append(self.current_package) - self.changed = True - self.message = 'Package upgraded: {0}'.format(self.current_package) - return True - else: - self.failed = True - self.message = err.strip() - raise HomebrewException(self.message) - def _upgrade_all_packages(self): opts = ( [self.brew_path, 'upgrade'] @@ -715,153 +625,188 @@ class Homebrew(object): if not self.packages: self._upgrade_all_packages() else: - for package in self.packages: - self.current_package = package - self._upgrade_current_package() - return True + # There are 3 action possible here depending on installed and outdated states: + # - not installed -> 'install' + # - installed and outdated -> 'upgrade' + # - installed and NOT outdated -> Nothing to do! + packages_to_install = set(self.packages) - self.installed_packages + packages_to_upgrade = self.installed_packages & self.outdated_packages + packages_to_install_or_upgrade = packages_to_install | packages_to_upgrade + + if len(packages_to_install_or_upgrade) == 0: + self.unchanged_pkgs.extend(self.packages) + self.message = 'Package{0} already upgraded: {1}'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages), + ) + return True + + if self.module.check_mode: + self.changed = True + self.message = 'Package{0} would be upgraded: {1}'.format( + "s" if len(packages_to_install_or_upgrade) > 1 else "", + ", ".join(packages_to_install_or_upgrade) + ) + raise HomebrewException(self.message) + + for command, packages in [ + ("install", packages_to_install), + ("upgrade", packages_to_upgrade) + ]: + if not packages: + continue + + opts = ( + [self.brew_path, command] + + self.install_options + + list(packages) + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) + + if rc != 0: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + + self.changed_pkgs.extend(packages_to_install_or_upgrade) + self.unchanged_pkgs.extend(set(self.packages) - packages_to_install_or_upgrade) + self.changed = True + self.message = 'Package{0} upgraded: {1}'.format( + "s" if len(packages_to_install_or_upgrade) > 1 else "", + ", ".join(packages_to_install_or_upgrade), + ) # /upgraded ------------------------------ }}} # uninstalled ---------------------------- {{{ - def _uninstall_current_package(self): - if not self.valid_package(self.current_package): - self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) + def _uninstall_packages(self): + packages_to_uninstall = self.installed_packages & set(self.packages) - if not self._current_package_is_installed(): - self.unchanged_count += 1 - self.unchanged_pkgs.append(self.current_package) - self.message = 'Package already uninstalled: {0}'.format( - self.current_package, + if len(packages_to_uninstall) == 0: + self.unchanged_pkgs.extend(self.packages) + self.message = 'Package{0} already uninstalled: {1}'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages), ) return True if self.module.check_mode: self.changed = True - self.message = 'Package would be uninstalled: {0}'.format( - self.current_package + self.message = 'Package{0} would be uninstalled: {1}'.format( + "s" if len(packages_to_uninstall) > 1 else "", + ", ".join(packages_to_uninstall) ) raise HomebrewException(self.message) opts = ( [self.brew_path, 'uninstall', '--force'] + self.install_options - + [self.current_package] + + list(packages_to_uninstall) ) cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) - if not self._current_package_is_installed(): - self.changed_count += 1 - self.changed_pkgs.append(self.current_package) + if rc == 0: + self.changed_pkgs.extend(packages_to_uninstall) + self.unchanged_pkgs.extend(set(self.packages) - self.installed_packages) self.changed = True - self.message = 'Package uninstalled: {0}'.format(self.current_package) + self.message = 'Package{0} uninstalled: {1}'.format( + "s" if len(packages_to_uninstall) > 1 else "", + ", ".join(packages_to_uninstall) + ) return True else: self.failed = True self.message = err.strip() raise HomebrewException(self.message) - - def _uninstall_packages(self): - for package in self.packages: - self.current_package = package - self._uninstall_current_package() - - return True # /uninstalled ----------------------------- }}} # linked --------------------------------- {{{ - def _link_current_package(self): - if not self.valid_package(self.current_package): + def _link_packages(self): + missing_packages = set(self.packages) - self.installed_packages + if missing_packages: self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - if not self._current_package_is_installed(): - self.failed = True - self.message = 'Package not installed: {0}.'.format(self.current_package) + self.message = 'Package{0} not installed: {1}.'.format( + "s" if len(missing_packages) > 1 else "", + ", ".join(missing_packages), + ) raise HomebrewException(self.message) if self.module.check_mode: self.changed = True - self.message = 'Package would be linked: {0}'.format( - self.current_package + self.message = 'Package{0} would be linked: {1}'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages) ) raise HomebrewException(self.message) opts = ( [self.brew_path, 'link'] + self.install_options - + [self.current_package] + + self.packages ) cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) if rc == 0: - self.changed_count += 1 - self.changed_pkgs.append(self.current_package) + self.changed_pkgs.extend(self.packages) self.changed = True - self.message = 'Package linked: {0}'.format(self.current_package) - + self.message = 'Package{0} linked: {1}'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages) + ) return True else: self.failed = True - self.message = 'Package could not be linked: {0}.'.format(self.current_package) + self.message = 'Package{0} could not be linked: {1}.'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages) + ) raise HomebrewException(self.message) - - def _link_packages(self): - for package in self.packages: - self.current_package = package - self._link_current_package() - - return True # /linked -------------------------------- }}} # unlinked ------------------------------- {{{ - def _unlink_current_package(self): - if not self.valid_package(self.current_package): + def _unlink_packages(self): + missing_packages = set(self.packages) - self.installed_packages + if missing_packages: self.failed = True - self.message = 'Invalid package: {0}.'.format(self.current_package) - raise HomebrewException(self.message) - - if not self._current_package_is_installed(): - self.failed = True - self.message = 'Package not installed: {0}.'.format(self.current_package) + self.message = 'Package{0} not installed: {1}.'.format( + "s" if len(missing_packages) > 1 else "", + ", ".join(missing_packages), + ) raise HomebrewException(self.message) if self.module.check_mode: self.changed = True - self.message = 'Package would be unlinked: {0}'.format( - self.current_package + self.message = 'Package{0} would be unlinked: {1}'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages) ) raise HomebrewException(self.message) opts = ( [self.brew_path, 'unlink'] + self.install_options - + [self.current_package] + + self.packages ) cmd = [opt for opt in opts if opt] rc, out, err = self.module.run_command(cmd) if rc == 0: - self.changed_count += 1 - self.changed_pkgs.append(self.current_package) + self.changed_pkgs.extend(self.packages) self.changed = True - self.message = 'Package unlinked: {0}'.format(self.current_package) - + self.message = 'Package{0} unlinked: {1}'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages) + ) return True else: self.failed = True - self.message = 'Package could not be unlinked: {0}.'.format(self.current_package) + self.message = 'Package{0} could not be unlinked: {1}.'.format( + "s" if len(self.packages) > 1 else "", + ", ".join(self.packages) + ) raise HomebrewException(self.message) - - def _unlink_packages(self): - for package in self.packages: - self.current_package = package - self._unlink_current_package() - - return True # /unlinked ------------------------------ }}} # /commands ---------------------------------------------------- }}} @@ -871,13 +816,11 @@ def main(): argument_spec=dict( name=dict( aliases=["pkg", "package", "formula"], - required=False, type='list', elements='str', ), path=dict( default="/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin", - required=False, type='path', ), state=dict( @@ -899,16 +842,18 @@ def main(): type='bool', ), install_options=dict( - default=None, aliases=['options'], type='list', elements='str', ), upgrade_options=dict( - default=None, type='list', elements='str', - ) + ), + force_formula=dict( + default=False, + type='bool', + ), ), supports_check_mode=True, ) @@ -918,7 +863,7 @@ def main(): p = module.params if p['name']: - packages = p['name'] + packages = [package_name.lower() for package_name in p['name']] else: packages = None @@ -940,6 +885,7 @@ def main(): if state in ('absent', 'removed', 'uninstalled'): state = 'absent' + force_formula = p['force_formula'] update_homebrew = p['update_homebrew'] if not update_homebrew: module.run_command_environ_update.update( @@ -956,7 +902,7 @@ def main(): brew = Homebrew(module=module, path=path, packages=packages, state=state, update_homebrew=update_homebrew, upgrade_all=upgrade_all, install_options=install_options, - upgrade_options=upgrade_options) + upgrade_options=upgrade_options, force_formula=force_formula) (failed, changed, message) = brew.run() changed_pkgs = brew.changed_pkgs unchanged_pkgs = brew.unchanged_pkgs diff --git a/plugins/modules/homebrew_cask.py b/plugins/modules/homebrew_cask.py index 5937831962..ac88e1bafe 100644 --- a/plugins/modules/homebrew_cask.py +++ b/plugins/modules/homebrew_cask.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2013, Daniel Jaouen # Copyright (c) 2016, Indrajit Raychaudhuri @@ -7,78 +6,81 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: homebrew_cask author: -- "Indrajit Raychaudhuri (@indrajitr)" -- "Daniel Jaouen (@danieljaouen)" -- "Enric Lluelles (@enriclluelles)" -requirements: -- "python >= 2.6" + - "Indrajit Raychaudhuri (@indrajitr)" + - "Daniel Jaouen (@danieljaouen)" + - "Enric Lluelles (@enriclluelles)" short_description: Install and uninstall homebrew casks description: -- Manages Homebrew casks. + - Manages Homebrew casks. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: name: description: - - Name of cask to install or remove. - aliases: [ 'cask', 'package', 'pkg' ] + - Name of cask to install or remove. + aliases: ['cask', 'package', 'pkg'] type: list elements: str path: description: - - "':' separated list of paths to search for 'brew' executable." + - "':' separated list of paths to search for 'brew' executable." default: '/usr/local/bin:/opt/homebrew/bin' type: path state: description: - - State of the cask. - choices: [ 'absent', 'installed', 'latest', 'present', 'removed', 'uninstalled', 'upgraded' ] + - State of the cask. + choices: ['absent', 'installed', 'latest', 'present', 'removed', 'uninstalled', 'upgraded'] default: present type: str sudo_password: description: - - The sudo password to be passed to SUDO_ASKPASS. + - The sudo password to be passed to E(SUDO_ASKPASS). required: false type: str update_homebrew: description: - - Update homebrew itself first. - - Note that C(brew cask update) is a synonym for C(brew update). + - Update homebrew itself first. + - Note that C(brew cask update) is a synonym for C(brew update). type: bool default: false install_options: description: - - Options flags to install a package. - aliases: [ 'options' ] + - Options flags to install a package. + aliases: ['options'] type: list elements: str accept_external_apps: description: - - Allow external apps. + - Allow external apps. type: bool default: false upgrade_all: description: - - Upgrade all casks. - - Mutually exclusive with C(upgraded) state. + - Upgrade all casks. + - Mutually exclusive with C(upgraded) state. type: bool default: false - aliases: [ 'upgrade' ] + aliases: ['upgrade'] greedy: description: - - Upgrade casks that auto update. - - Passes --greedy to brew cask outdated when checking - if an installed cask has a newer version available. + - Upgrade casks that auto update. + - Passes C(--greedy) to C(brew outdated --cask) when checking if an installed cask has a newer version available, or + to C(brew upgrade --cask) when upgrading all casks. type: bool default: false -''' -EXAMPLES = ''' +""" +EXAMPLES = r""" - name: Install cask community.general.homebrew_cask: name: alfred @@ -123,6 +125,11 @@ EXAMPLES = ''' community.general.homebrew_cask: upgrade_all: true +- name: Upgrade all casks with greedy option + community.general.homebrew_cask: + upgrade_all: true + greedy: true + - name: Upgrade given cask with force option community.general.homebrew_cask: name: alfred @@ -140,17 +147,17 @@ EXAMPLES = ''' name: wireshark state: present sudo_password: "{{ ansible_become_pass }}" -''' +""" import os import re import tempfile from ansible_collections.community.general.plugins.module_utils.version import LooseVersion +from ansible_collections.community.general.plugins.module_utils.homebrew import HomebrewValidate from ansible.module_utils.common.text.converters import to_bytes from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six import iteritems, string_types # exceptions -------------------------------------------------------------- {{{ @@ -162,7 +169,7 @@ class HomebrewCaskException(Exception): # utils ------------------------------------------------------------------- {{{ def _create_regex_group_complement(s): lines = (line.strip() for line in s.split('\n') if line.strip()) - chars = filter(None, (line.split('#')[0].strip() for line in lines)) + chars = [_f for _f in (line.split('#')[0].strip() for line in lines) if _f] group = r'[^' + r''.join(chars) + r']' return re.compile(group) # /utils ------------------------------------------------------------------ }}} @@ -172,83 +179,19 @@ class HomebrewCask(object): '''A class to manage Homebrew casks.''' # class regexes ------------------------------------------------ {{{ - VALID_PATH_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - \s # spaces - : # colons - {sep} # the OS-specific path separator - . # dots - \- # dashes - '''.format(sep=os.path.sep) - - VALID_BREW_PATH_CHARS = r''' - \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) - \s # spaces - {sep} # the OS-specific path separator - . # dots - \- # dashes - '''.format(sep=os.path.sep) - VALID_CASK_CHARS = r''' \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) . # dots / # slash (for taps) \- # dashes @ # at symbol + \+ # plus symbol ''' - INVALID_PATH_REGEX = _create_regex_group_complement(VALID_PATH_CHARS) - INVALID_BREW_PATH_REGEX = _create_regex_group_complement(VALID_BREW_PATH_CHARS) INVALID_CASK_REGEX = _create_regex_group_complement(VALID_CASK_CHARS) # /class regexes ----------------------------------------------- }}} # class validations -------------------------------------------- {{{ - @classmethod - def valid_path(cls, path): - ''' - `path` must be one of: - - list of paths - - a string containing only: - - alphanumeric characters - - dashes - - dots - - spaces - - colons - - os.path.sep - ''' - - if isinstance(path, (string_types)): - return not cls.INVALID_PATH_REGEX.search(path) - - try: - iter(path) - except TypeError: - return False - else: - paths = path - return all(cls.valid_brew_path(path_) for path_ in paths) - - @classmethod - def valid_brew_path(cls, brew_path): - ''' - `brew_path` must be one of: - - None - - a string containing only: - - alphanumeric characters - - dashes - - dots - - spaces - - os.path.sep - ''' - - if brew_path is None: - return True - - return ( - isinstance(brew_path, string_types) - and not cls.INVALID_BREW_PATH_REGEX.search(brew_path) - ) - @classmethod def valid_cask(cls, cask): '''A valid cask is either None or alphanumeric + backslashes.''' @@ -257,7 +200,7 @@ class HomebrewCask(object): return True return ( - isinstance(cask, string_types) + isinstance(cask, str) and not cls.INVALID_CASK_REGEX.search(cask) ) @@ -273,7 +216,7 @@ class HomebrewCask(object): return True else: return ( - isinstance(state, string_types) + isinstance(state, str) and state.lower() in ( 'installed', 'absent', @@ -310,14 +253,14 @@ class HomebrewCask(object): @path.setter def path(self, path): - if not self.valid_path(path): + if not HomebrewValidate.valid_path(path): self._path = [] self.failed = True self.message = 'Invalid path: {0}.'.format(path) raise HomebrewCaskException(self.message) else: - if isinstance(path, string_types): + if isinstance(path, str): self._path = path.split(':') else: self._path = path @@ -330,7 +273,7 @@ class HomebrewCask(object): @brew_path.setter def brew_path(self, brew_path): - if not self.valid_brew_path(brew_path): + if not HomebrewValidate.valid_brew_path(brew_path): self._brew_path = None self.failed = True self.message = 'Invalid brew_path: {0}.'.format(brew_path) @@ -404,7 +347,7 @@ class HomebrewCask(object): self.message = '' def _setup_instance_vars(self, **kwargs): - for key, val in iteritems(kwargs): + for key, val in kwargs.items(): setattr(self, key, val) def _prep(self): @@ -479,10 +422,7 @@ class HomebrewCask(object): cmd = base_opts + [self.current_cask] rc, out, err = self.module.run_command(cmd) - if rc == 0: - return True - else: - return False + return rc == 0 def _get_brew_version(self): if self.brew_version: @@ -490,11 +430,13 @@ class HomebrewCask(object): cmd = [self.brew_path, '--version'] - rc, out, err = self.module.run_command(cmd, check_rc=True) + dummy, out, dummy = self.module.run_command(cmd, check_rc=True) - # get version string from first line of "brew --version" output - version = out.split('\n')[0].split(' ')[1] - self.brew_version = version + pattern = r"Homebrew (.*)(\d+\.\d+\.\d+)(-dirty)?" + rematch = re.search(pattern, out) + if not rematch: + self.module.fail_json(msg="Failed to match regex to get brew version", stdout=out) + self.brew_version = rematch.groups()[1] return self.brew_version def _brew_cask_command_is_deprecated(self): @@ -545,7 +487,7 @@ class HomebrewCask(object): 'update', ]) if rc == 0: - if out and isinstance(out, string_types): + if out and isinstance(out, str): already_updated = any( re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE) for s in out.split('\n') @@ -576,6 +518,9 @@ class HomebrewCask(object): else: cmd = [self.brew_path, 'cask', 'upgrade'] + if self.greedy: + cmd = cmd + ['--greedy'] + rc, out, err = '', '', '' if self.sudo_password: @@ -584,7 +529,12 @@ class HomebrewCask(object): rc, out, err = self.module.run_command(cmd) if rc == 0: - if re.search(r'==> No Casks to upgrade', out.strip(), re.IGNORECASE): + # 'brew upgrade --cask' does not output anything if no casks are upgraded + if not out.strip(): + self.message = 'Homebrew casks already upgraded.' + + # handle legacy 'brew cask upgrade' + elif re.search(r'==> No Casks to upgrade', out.strip(), re.IGNORECASE): self.message = 'Homebrew casks already upgraded.' else: @@ -781,13 +731,11 @@ def main(): argument_spec=dict( name=dict( aliases=["pkg", "package", "cask"], - required=False, type='list', elements='str', ), path=dict( default="/usr/local/bin:/opt/homebrew/bin", - required=False, type='path', ), state=dict( @@ -800,7 +748,6 @@ def main(): ), sudo_password=dict( type="str", - required=False, no_log=True, ), update_homebrew=dict( @@ -808,7 +755,6 @@ def main(): type='bool', ), install_options=dict( - default=None, aliases=['options'], type='list', elements='str', diff --git a/plugins/modules/homebrew_services.py b/plugins/modules/homebrew_services.py new file mode 100644 index 0000000000..5527aae133 --- /dev/null +++ b/plugins/modules/homebrew_services.py @@ -0,0 +1,251 @@ +#!/usr/bin/python + +# Copyright (c) 2013, Andrew Dunham +# Copyright (c) 2013, Daniel Jaouen +# Copyright (c) 2015, Indrajit Raychaudhuri +# Copyright (c) 2024, Kit Ham +# +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import annotations + +DOCUMENTATION = r""" +module: homebrew_services +author: + - "Kit Ham (@kitizz)" +requirements: + - homebrew must already be installed on the target system +short_description: Services manager for Homebrew +version_added: 9.3.0 +description: + - Manages daemons and services using Homebrew. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none +options: + name: + description: + - An installed homebrew package whose service is to be updated. + aliases: ['formula'] + type: str + required: true + path: + description: + - A V(:) separated list of paths to search for C(brew) executable. Since a package (I(formula) in homebrew parlance) + location is prefixed relative to the actual path of C(brew) command, providing an alternative C(brew) path enables + managing different set of packages in an alternative location in the system. + default: '/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin' + type: path + state: + description: + - State of the package's service. + choices: ['present', 'absent', 'restarted'] + default: present + type: str +""" + +EXAMPLES = r""" +- name: Install foo package + community.general.homebrew: + name: foo + state: present + +- name: Start the foo service (equivalent to `brew services start foo`) + community.general.homebrew_services: + name: foo + state: present + +- name: Restart the foo service (equivalent to `brew services restart foo`) + community.general.homebrew_services: + name: foo + state: restarted + +- name: Remove the foo service (equivalent to `brew services stop foo`) + community.general.homebrew_services: + name: foo + state: absent +""" + +RETURN = r""" +pid: + description: + - If the service is now running, this is the PID of the service, otherwise -1. + returned: success + type: int + sample: 1234 +running: + description: + - Whether the service is running after running this command. + returned: success + type: bool + sample: true +""" + +import json +import sys + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils.homebrew import ( + HomebrewValidate, + parse_brew_path, +) + +if sys.version_info < (3, 5): + from collections import namedtuple + + # Stores validated arguments for an instance of an action. + # See DOCUMENTATION string for argument-specific information. + HomebrewServiceArgs = namedtuple( + "HomebrewServiceArgs", ["name", "state", "brew_path"] + ) + + # Stores the state of a Homebrew service. + HomebrewServiceState = namedtuple("HomebrewServiceState", ["running", "pid"]) + +else: + from typing import NamedTuple, Optional + + # Stores validated arguments for an instance of an action. + # See DOCUMENTATION string for argument-specific information. + HomebrewServiceArgs = NamedTuple( + "HomebrewServiceArgs", [("name", str), ("state", str), ("brew_path", str)] + ) + + # Stores the state of a Homebrew service. + HomebrewServiceState = NamedTuple( + "HomebrewServiceState", [("running", bool), ("pid", Optional[int])] + ) + + +def _brew_service_state(args, module): + # type: (HomebrewServiceArgs, AnsibleModule) -> HomebrewServiceState + cmd = [args.brew_path, "services", "info", args.name, "--json"] + rc, stdout, stderr = module.run_command(cmd, check_rc=True) + + try: + data = json.loads(stdout)[0] + except json.JSONDecodeError: + module.fail_json(msg="Failed to parse JSON output:\n{0}".format(stdout)) + + return HomebrewServiceState(running=data["status"] == "started", pid=data["pid"]) + + +def _exit_with_state(args, module, changed=False, message=None): + # type: (HomebrewServiceArgs, AnsibleModule, bool, Optional[str]) -> None + state = _brew_service_state(args, module) + if message is None: + message = ( + "Running: {state.running}, Changed: {changed}, PID: {state.pid}".format( + state=state, changed=changed + ) + ) + module.exit_json(msg=message, pid=state.pid, running=state.running, changed=changed) + + +def validate_and_load_arguments(module): + # type: (AnsibleModule) -> HomebrewServiceArgs + """Reuse the Homebrew module's validation logic to validate these arguments.""" + package = module.params["name"] # type: ignore + if not HomebrewValidate.valid_package(package): + module.fail_json(msg="Invalid package name: {0}".format(package)) + + state = module.params["state"] # type: ignore + if state not in ["present", "absent", "restarted"]: + module.fail_json(msg="Invalid state: {0}".format(state)) + + brew_path = parse_brew_path(module) + + return HomebrewServiceArgs(name=package, state=state, brew_path=brew_path) + + +def start_service(args, module): + # type: (HomebrewServiceArgs, AnsibleModule) -> None + """Start the requested brew service if it is not already running.""" + state = _brew_service_state(args, module) + if state.running: + # Nothing to do, return early. + _exit_with_state(args, module, changed=False, message="Service already running") + + if module.check_mode: + _exit_with_state(args, module, changed=True, message="Service would be started") + + start_cmd = [args.brew_path, "services", "start", args.name] + rc, stdout, stderr = module.run_command(start_cmd, check_rc=True) + + _exit_with_state(args, module, changed=True) + + +def stop_service(args, module): + # type: (HomebrewServiceArgs, AnsibleModule) -> None + """Stop the requested brew service if it is running.""" + state = _brew_service_state(args, module) + if not state.running: + # Nothing to do, return early. + _exit_with_state(args, module, changed=False, message="Service already stopped") + + if module.check_mode: + _exit_with_state(args, module, changed=True, message="Service would be stopped") + + stop_cmd = [args.brew_path, "services", "stop", args.name] + rc, stdout, stderr = module.run_command(stop_cmd, check_rc=True) + + _exit_with_state(args, module, changed=True) + + +def restart_service(args, module): + # type: (HomebrewServiceArgs, AnsibleModule) -> None + """Restart the requested brew service. This always results in a change.""" + if module.check_mode: + _exit_with_state( + args, module, changed=True, message="Service would be restarted" + ) + + restart_cmd = [args.brew_path, "services", "restart", args.name] + rc, stdout, stderr = module.run_command(restart_cmd, check_rc=True) + + _exit_with_state(args, module, changed=True) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict( + aliases=["formula"], + required=True, + type="str", + ), + state=dict( + choices=["present", "absent", "restarted"], + default="present", + ), + path=dict( + default="/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin", + type="path", + ), + ), + supports_check_mode=True, + ) + + module.run_command_environ_update = dict( + LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C" + ) + + # Pre-validate arguments. + service_args = validate_and_load_arguments(module) + + # Choose logic based on the desired state. + if service_args.state == "present": + start_service(service_args, module) + elif service_args.state == "absent": + stop_service(service_args, module) + elif service_args.state == "restarted": + restart_service(service_args, module) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/homebrew_tap.py b/plugins/modules/homebrew_tap.py index 7099773b21..813b89db44 100644 --- a/plugins/modules/homebrew_tap.py +++ b/plugins/modules/homebrew_tap.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2013, Daniel Jaouen # Copyright (c) 2016, Indrajit Raychaudhuri @@ -9,53 +8,56 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: homebrew_tap author: - - "Indrajit Raychaudhuri (@indrajitr)" - - "Daniel Jaouen (@danieljaouen)" + - "Indrajit Raychaudhuri (@indrajitr)" + - "Daniel Jaouen (@danieljaouen)" short_description: Tap a Homebrew repository description: - - Tap external Homebrew repositories. + - Tap external Homebrew repositories. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - The GitHub user/organization repository to tap. - required: true - aliases: ['tap'] - type: list - elements: str - url: - description: - - The optional git URL of the repository to tap. The URL is not - assumed to be on GitHub, and the protocol doesn't have to be HTTP. - Any location and protocol that git can handle is fine. - - I(name) option may not be a list of multiple taps (but a single - tap instead) when this option is provided. - required: false - type: str - state: - description: - - state of the repository. - choices: [ 'present', 'absent' ] - required: false - default: 'present' - type: str - path: - description: - - "A C(:) separated list of paths to search for C(brew) executable." - default: '/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin' - type: path - version_added: '2.1.0' -requirements: [ homebrew ] -''' + name: + description: + - The GitHub user/organization repository to tap. + required: true + aliases: ['tap'] + type: list + elements: str + url: + description: + - The optional git URL of the repository to tap. The URL is not assumed to be on GitHub, and the protocol does not have + to be HTTP. Any location and protocol that git can handle is fine. + - O(name) option may not be a list of multiple taps (but a single tap instead) when this option is provided. + required: false + type: str + state: + description: + - State of the repository. + choices: ['present', 'absent'] + required: false + default: 'present' + type: str + path: + description: + - A V(:) separated list of paths to search for C(brew) executable. + default: '/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin' + type: path + version_added: '2.1.0' +requirements: [homebrew] +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Tap a Homebrew repository, state present community.general.homebrew_tap: name: homebrew/dupes @@ -74,7 +76,7 @@ EXAMPLES = r''' community.general.homebrew_tap: name: telemachus/brew url: 'https://bitbucket.org/telemachus/brew' -''' +""" import re @@ -216,11 +218,10 @@ def main(): module = AnsibleModule( argument_spec=dict( name=dict(aliases=['tap'], type='list', required=True, elements='str'), - url=dict(default=None, required=False), + url=dict(), state=dict(default='present', choices=['present', 'absent']), path=dict( default="/usr/local/bin:/opt/homebrew/bin:/home/linuxbrew/.linuxbrew/bin", - required=False, type='path', ), ), diff --git a/plugins/modules/homectl.py b/plugins/modules/homectl.py index c14dc4af9a..90e97fc484 100644 --- a/plugins/modules/homectl.py +++ b/plugins/modules/homectl.py @@ -1,180 +1,190 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2022, James Livulpi # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: homectl author: - - "James Livulpi (@jameslivulpi)" + - "James Livulpi (@jameslivulpi)" short_description: Manage user accounts with systemd-homed version_added: 4.4.0 description: - - Manages a user's home directory managed by systemd-homed. + - Manages a user's home directory managed by systemd-homed. +notes: + - This module requires the deprecated L(crypt Python module, https://docs.python.org/3.12/library/crypt.html) library which + was removed from Python 3.13. For Python 3.13 or newer, you need to install L(legacycrypt, https://pypi.org/project/legacycrypt/). +requirements: + - legacycrypt (on Python 3.13 or newer) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - description: - - The user name to create, remove, or update. - required: true - aliases: [ 'user', 'username' ] - type: str - password: - description: - - Set the user's password to this. - - Homed requires this value to be in cleartext on user creation and updating a user. - - The module takes the password and generates a password hash in SHA-512 with 10000 rounds of salt generation using crypt. - - See U(https://systemd.io/USER_RECORD/). - - This is required for I(state=present). When an existing user is updated this is checked against the stored hash in homed. - type: str - state: - description: - - The operation to take on the user. - choices: [ 'absent', 'present' ] - default: present - type: str - storage: - description: - - Indicates the storage mechanism for the user's home directory. - - If the storage type is not specified, ``homed.conf(5)`` defines which default storage to use. - - Only used when a user is first created. - choices: [ 'classic', 'luks', 'directory', 'subvolume', 'fscrypt', 'cifs' ] - type: str - disksize: - description: - - The intended home directory disk space. - - Human readable value such as C(10G), C(10M), or C(10B). - type: str - resize: - description: - - When used with I(disksize) this will attempt to resize the home directory immediately. - default: false - type: bool - realname: - description: - - The user's real ('human') name. - - This can also be used to add a comment to maintain compatibility with C(useradd). - aliases: [ 'comment' ] - type: str - realm: - description: - - The 'realm' a user is defined in. - type: str - email: - description: - - The email address of the user. - type: str - location: - description: - - A free-form location string describing the location of the user. - type: str - iconname: - description: - - The name of an icon picked by the user, for example for the purpose of an avatar. - - Should follow the semantics defined in the Icon Naming Specification. - - See U(https://specifications.freedesktop.org/icon-naming-spec/icon-naming-spec-latest.html) for specifics. - type: str - homedir: - description: - - Path to use as home directory for the user. - - This is the directory the user's home directory is mounted to while the user is logged in. - - This is not where the user's data is actually stored, see I(imagepath) for that. - - Only used when a user is first created. - type: path - imagepath: - description: - - Path to place the user's home directory. - - See U(https://www.freedesktop.org/software/systemd/man/homectl.html#--image-path=PATH) for more information. - - Only used when a user is first created. - type: path - uid: - description: - - Sets the UID of the user. - - If using I(gid) homed requires the value to be the same. - - Only used when a user is first created. - type: int - gid: - description: - - Sets the gid of the user. - - If using I(uid) homed requires the value to be the same. - - Only used when a user is first created. - type: int - mountopts: - description: - - String separated by comma each indicating mount options for a users home directory. - - Valid options are C(nosuid), C(nodev) or C(noexec). - - Homed by default uses C(nodev) and C(nosuid) while C(noexec) is off. - type: str - umask: - description: - - Sets the umask for the user's login sessions - - Value from C(0000) to C(0777). - type: int - memberof: - description: - - String separated by comma each indicating a UNIX group this user shall be a member of. - - Groups the user should be a member of should be supplied as comma separated list. - aliases: [ 'groups' ] - type: str - skeleton: - description: - - The absolute path to the skeleton directory to populate a new home directory from. - - This is only used when a home directory is first created. - - If not specified homed by default uses C(/etc/skel). - aliases: [ 'skel' ] - type: path - shell: - description: - - Shell binary to use for terminal logins of given user. - - If not specified homed by default uses C(/bin/bash). - type: str - environment: - description: - - String separated by comma each containing an environment variable and its value to - set for the user's login session, in a format compatible with ``putenv()``. - - Any environment variable listed here is automatically set by pam_systemd for all - login sessions of the user. - aliases: [ 'setenv' ] - type: str - timezone: - description: - - Preferred timezone to use for the user. - - Should be a tzdata compatible location string such as C(America/New_York). - type: str - locked: - description: - - Whether the user account should be locked or not. - type: bool - language: - description: - - The preferred language/locale for the user. - - This should be in a format compatible with the C($LANG) environment variable. - type: str - passwordhint: - description: - - Password hint for the given user. - type: str - sshkeys: - description: - - String separated by comma each listing a SSH public key that is authorized to access the account. - - The keys should follow the same format as the lines in a traditional C(~/.ssh/authorized_key) file. - type: str - notbefore: - description: - - A time since the UNIX epoch before which the record should be considered invalid for the purpose of logging in. - type: int - notafter: - description: - - A time since the UNIX epoch after which the record should be considered invalid for the purpose of logging in. - type: int -''' + name: + description: + - The user name to create, remove, or update. + required: true + aliases: ['user', 'username'] + type: str + password: + description: + - Set the user's password to this. + - Homed requires this value to be in cleartext on user creation and updating a user. + - The module takes the password and generates a password hash in SHA-512 with 10000 rounds of salt generation using + crypt. + - See U(https://systemd.io/USER_RECORD/). + - This is required for O(state=present). When an existing user is updated this is checked against the stored hash in + homed. + type: str + state: + description: + - The operation to take on the user. + choices: ['absent', 'present'] + default: present + type: str + storage: + description: + - Indicates the storage mechanism for the user's home directory. + - If the storage type is not specified, C(homed.conf(5\)) defines which default storage to use. + - Only used when a user is first created. + choices: ['classic', 'luks', 'directory', 'subvolume', 'fscrypt', 'cifs'] + type: str + disksize: + description: + - The intended home directory disk space. + - Human readable value such as V(10G), V(10M), or V(10B). + type: str + resize: + description: + - When used with O(disksize) this attempts to resize the home directory immediately. + default: false + type: bool + realname: + description: + - The user's real ('human') name. + - This can also be used to add a comment to maintain compatibility with C(useradd). + aliases: ['comment'] + type: str + realm: + description: + - The 'realm' a user is defined in. + type: str + email: + description: + - The email address of the user. + type: str + location: + description: + - A free-form location string describing the location of the user. + type: str + iconname: + description: + - The name of an icon picked by the user, for example for the purpose of an avatar. + - Should follow the semantics defined in the Icon Naming Specification. + - See U(https://specifications.freedesktop.org/icon-naming-spec/icon-naming-spec-latest.html) for specifics. + type: str + homedir: + description: + - Path to use as home directory for the user. + - This is the directory the user's home directory is mounted to while the user is logged in. + - This is not where the user's data is actually stored, see O(imagepath) for that. + - Only used when a user is first created. + type: path + imagepath: + description: + - Path to place the user's home directory. + - See U(https://www.freedesktop.org/software/systemd/man/homectl.html#--image-path=PATH) for more information. + - Only used when a user is first created. + type: path + uid: + description: + - Sets the UID of the user. + - If using O(gid) homed requires the value to be the same. + - Only used when a user is first created. + type: int + gid: + description: + - Sets the gid of the user. + - If using O(uid) homed requires the value to be the same. + - Only used when a user is first created. + type: int + mountopts: + description: + - String separated by comma each indicating mount options for a users home directory. + - Valid options are V(nosuid), V(nodev) or V(noexec). + - Homed by default uses V(nodev) and V(nosuid) while V(noexec) is off. + type: str + umask: + description: + - Sets the umask for the user's login sessions. + - Value from V(0000) to V(0777). + type: int + memberof: + description: + - String separated by comma each indicating a UNIX group this user shall be a member of. + - Groups the user should be a member of should be supplied as comma separated list. + aliases: ['groups'] + type: str + skeleton: + description: + - The absolute path to the skeleton directory to populate a new home directory from. + - This is only used when a home directory is first created. + - If not specified homed by default uses V(/etc/skel). + aliases: ['skel'] + type: path + shell: + description: + - Shell binary to use for terminal logins of given user. + - If not specified homed by default uses V(/bin/bash). + type: str + environment: + description: + - String separated by comma each containing an environment variable and its value to set for the user's login session, + in a format compatible with C(putenv(\)). + - Any environment variable listed here is automatically set by pam_systemd for all login sessions of the user. + aliases: ['setenv'] + type: str + timezone: + description: + - Preferred timezone to use for the user. + - Should be a tzdata compatible location string such as V(America/New_York). + type: str + locked: + description: + - Whether the user account should be locked or not. + type: bool + language: + description: + - The preferred language/locale for the user. + - This should be in a format compatible with the E(LANG) environment variable. + type: str + passwordhint: + description: + - Password hint for the given user. + type: str + sshkeys: + description: + - String separated by comma each listing a SSH public key that is authorized to access the account. + - The keys should follow the same format as the lines in a traditional C(~/.ssh/authorized_key) file. + type: str + notbefore: + description: + - A time since the UNIX epoch before which the record should be considered invalid for the purpose of logging in. + type: int + notafter: + description: + - A time since the UNIX epoch after which the record should be considered invalid for the purpose of logging in. + type: int +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Add the user 'james' community.general.homectl: name: johnd @@ -202,70 +212,89 @@ EXAMPLES = ''' community.general.homectl: name: janet state: absent -''' +""" -RETURN = ''' +RETURN = r""" data: - description: A json dictionary returned from C(homectl inspect -j). - returned: success - type: dict - sample: { - "data": { - "binding": { - "e9ed2a5b0033427286b228e97c1e8343": { - "fileSystemType": "btrfs", - "fileSystemUuid": "7bd59491-2812-4642-a492-220c3f0c6c0b", - "gid": 60268, - "imagePath": "/home/james.home", - "luksCipher": "aes", - "luksCipherMode": "xts-plain64", - "luksUuid": "7f05825a-2c38-47b4-90e1-f21540a35a81", - "luksVolumeKeySize": 32, - "partitionUuid": "5a906126-d3c8-4234-b230-8f6e9b427b2f", - "storage": "luks", - "uid": 60268 - } - }, + description: Dictionary returned from C(homectl inspect -j). + returned: success + type: dict + sample: + { + "data": { + "binding": { + "e9ed2a5b0033427286b228e97c1e8343": { + "fileSystemType": "btrfs", + "fileSystemUuid": "7bd59491-2812-4642-a492-220c3f0c6c0b", + "gid": 60268, + "imagePath": "/home/james.home", + "luksCipher": "aes", + "luksCipherMode": "xts-plain64", + "luksUuid": "7f05825a-2c38-47b4-90e1-f21540a35a81", + "luksVolumeKeySize": 32, + "partitionUuid": "5a906126-d3c8-4234-b230-8f6e9b427b2f", + "storage": "luks", + "uid": 60268 + } + }, + "diskSize": 3221225472, + "disposition": "regular", + "lastChangeUSec": 1641941238208691, + "lastPasswordChangeUSec": 1641941238208691, + "privileged": { + "hashedPassword": [ + "$6$ov9AKni.trf76inT$tTtfSyHgbPTdUsG0CvSSQZXGqFGdHKQ9Pb6e0BTZhDmlgrL/vA5BxrXduBi8u/PCBiYUffGLIkGhApjKMK3bV." + ] + }, + "signature": [ + { + "data": "o6zVFbymcmk4YTVaY6KPQK23YCp+VkXdGEeniZeV1pzIbFzoaZBvVLPkNKMoPAQbodY5BYfBtuy41prNL78qAg==", + "key": "-----BEGIN PUBLIC KEY-----\nMCowBQYDK2VwAyEAbs7ELeiEYBxkUQhxZ+5NGyu6J7gTtZtZ5vmIw3jowcY=\n-----END PUBLIC KEY-----\n" + } + ], + "status": { + "e9ed2a5b0033427286b228e97c1e8343": { + "diskCeiling": 21845405696, + "diskFloor": 268435456, "diskSize": 3221225472, - "disposition": "regular", - "lastChangeUSec": 1641941238208691, - "lastPasswordChangeUSec": 1641941238208691, - "privileged": { - "hashedPassword": [ - "$6$ov9AKni.trf76inT$tTtfSyHgbPTdUsG0CvSSQZXGqFGdHKQ9Pb6e0BTZhDmlgrL/vA5BxrXduBi8u/PCBiYUffGLIkGhApjKMK3bV." - ] - }, - "signature": [ - { - "data": "o6zVFbymcmk4YTVaY6KPQK23YCp+VkXdGEeniZeV1pzIbFzoaZBvVLPkNKMoPAQbodY5BYfBtuy41prNL78qAg==", - "key": "-----BEGIN PUBLIC KEY-----\nMCowBQYDK2VwAyEAbs7ELeiEYBxkUQhxZ+5NGyu6J7gTtZtZ5vmIw3jowcY=\n-----END PUBLIC KEY-----\n" - } - ], - "status": { - "e9ed2a5b0033427286b228e97c1e8343": { - "diskCeiling": 21845405696, - "diskFloor": 268435456, - "diskSize": 3221225472, - "service": "io.systemd.Home", - "signedLocally": true, - "state": "inactive" - } - }, - "userName": "james", - } + "service": "io.systemd.Home", + "signedLocally": true, + "state": "inactive" + } + }, + "userName": "james" + } } -''' +""" -import crypt import json -from ansible.module_utils.basic import AnsibleModule +import traceback +from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils.basic import jsonify from ansible.module_utils.common.text.formatters import human_to_bytes +try: + import crypt +except ImportError: + HAS_CRYPT = False + CRYPT_IMPORT_ERROR = traceback.format_exc() +else: + HAS_CRYPT = True + CRYPT_IMPORT_ERROR = None + +try: + import legacycrypt + if not HAS_CRYPT: + crypt = legacycrypt +except ImportError: + HAS_LEGACYCRYPT = False + LEGACYCRYPT_IMPORT_ERROR = traceback.format_exc() +else: + HAS_LEGACYCRYPT = True + LEGACYCRYPT_IMPORT_ERROR = None + class Homectl(object): - '''#TODO DOC STRINGS''' - def __init__(self, module): self.module = module self.state = module.params['state'] @@ -386,7 +415,7 @@ class Homectl(object): user_metadata.pop('status', None) # Let last change Usec be updated by homed when command runs. user_metadata.pop('lastChangeUSec', None) - # Now only change fields that are called on leaving whats currently in the record intact. + # Now only change fields that are called on leaving what's currently in the record intact. record = user_metadata record['userName'] = self.name @@ -432,7 +461,7 @@ class Homectl(object): self.result['changed'] = True if self.disksize: - # convert humand readble to bytes + # convert human readable to bytes if self.disksize != record.get('diskSize'): record['diskSize'] = human_to_bytes(self.disksize) self.result['changed'] = True @@ -584,6 +613,12 @@ def main(): ] ) + if not HAS_CRYPT and not HAS_LEGACYCRYPT: + module.fail_json( + msg=missing_required_lib('crypt (part of standard library up to Python 3.12) or legacycrypt (PyPI)'), + exception=CRYPT_IMPORT_ERROR, + ) + homectl = Homectl(module) homectl.result['state'] = homectl.state diff --git a/plugins/modules/honeybadger_deployment.py b/plugins/modules/honeybadger_deployment.py index be8412f9af..a5fe8c86f7 100644 --- a/plugins/modules/honeybadger_deployment.py +++ b/plugins/modules/honeybadger_deployment.py @@ -1,20 +1,24 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright 2014 Benjamin Curtis # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: honeybadger_deployment author: "Benjamin Curtis (@stympy)" short_description: Notify Honeybadger.io about app deployments description: - - Notify Honeybadger.io about app deployments (see http://docs.honeybadger.io/article/188-deployment-tracking) + - Notify Honeybadger.io about app deployments (see U(http://docs.honeybadger.io/article/188-deployment-tracking)). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: token: type: str @@ -24,20 +28,20 @@ options: environment: type: str description: - - The environment name, typically 'production', 'staging', etc. + - The environment name, typically V(production), V(staging), and so on. required: true user: type: str description: - - The username of the person doing the deployment + - The username of the person doing the deployment. repo: type: str description: - - URL of the project repository + - URL of the project repository. revision: type: str description: - - A hash, number, tag, or other identifier showing what revision was deployed + - A hash, number, tag, or other identifier showing what revision was deployed. url: type: str description: @@ -45,14 +49,13 @@ options: default: "https://api.honeybadger.io/v1/deploys" validate_certs: description: - - If C(false), SSL certificates for the target url will not be validated. This should only be used - on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates for the target URL are not validated. This should only be used on personally controlled + sites using self-signed certificates. type: bool default: true +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Notify Honeybadger.io about an app deployment community.general.honeybadger_deployment: token: AAAAAA @@ -60,14 +63,14 @@ EXAMPLES = ''' user: ansible revision: b6826b8 repo: 'git@github.com:user/repo.git' -''' +""" -RETURN = '''# ''' +RETURN = """#""" import traceback +from urllib.parse import urlencode from ansible.module_utils.basic import AnsibleModule -from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.urls import fetch_url @@ -83,9 +86,9 @@ def main(): token=dict(required=True, no_log=True), environment=dict(required=True), user=dict(required=False), - repo=dict(required=False), - revision=dict(required=False), - url=dict(required=False, default='https://api.honeybadger.io/v1/deploys'), + repo=dict(), + revision=dict(), + url=dict(default='https://api.honeybadger.io/v1/deploys'), validate_certs=dict(default=True, type='bool'), ), supports_check_mode=True diff --git a/plugins/modules/hpilo_boot.py b/plugins/modules/hpilo_boot.py index f663a7b5f5..bf44a4dac4 100644 --- a/plugins/modules/hpilo_boot.py +++ b/plugins/modules/hpilo_boot.py @@ -1,23 +1,27 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright 2012 Dag Wieers # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hpilo_boot author: Dag Wieers (@dagwieers) short_description: Boot system using specific media through HP iLO interface description: -- "This module boots a system through its HP iLO interface. The boot media - can be one of: cdrom, floppy, hdd, network or usb." -- This module requires the hpilo python module. + - 'This module boots a system through its HP iLO interface. The boot media can be one of: V(cdrom), V(floppy), V(hdd), V(network), + or V(usb).' + - This module requires the hpilo python module. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: host: description: @@ -36,33 +40,32 @@ options: type: str media: description: - - The boot media to boot the system from - choices: [ "cdrom", "floppy", "rbsu", "hdd", "network", "normal", "usb" ] + - The boot media to boot the system from. + choices: ["cdrom", "floppy", "rbsu", "hdd", "network", "normal", "usb"] type: str image: description: - - The URL of a cdrom, floppy or usb boot media image. - protocol://username:password@hostname:port/filename - - protocol is either 'http' or 'https' - - username:password is optional - - port is optional + - The URL of a cdrom, floppy or usb boot media image in the form V(protocol://username:password@hostname:port/filename). + - V(protocol) is either V(http) or V(https). + - V(username:password) is optional. + - V(port) is optional. type: str state: description: - The state of the boot media. - - "no_boot: Do not boot from the device" - - "boot_once: Boot from the device once and then notthereafter" - - "boot_always: Boot from the device each time the server is rebooted" - - "connect: Connect the virtual media device and set to boot_always" - - "disconnect: Disconnects the virtual media device and set to no_boot" - - "poweroff: Power off the server" + - 'V(no_boot): Do not boot from the device.' + - 'V(boot_once): Boot from the device once and then notthereafter.' + - 'V(boot_always): Boot from the device each time the server is rebooted.' + - 'V(connect): Connect the virtual media device and set to boot_always.' + - 'V(disconnect): Disconnects the virtual media device and set to no_boot.' + - 'V(poweroff): Power off the server.' default: boot_once type: str - choices: [ "boot_always", "boot_once", "connect", "disconnect", "no_boot", "poweroff" ] + choices: ["boot_always", "boot_once", "connect", "disconnect", "no_boot", "poweroff"] force: description: - - Whether to force a reboot (even when the system is already booted). - - As a safeguard, without force, hpilo_boot will refuse to reboot a server that is already running. + - Whether to force a reboot (even when the system is already booted). + - As a safeguard, without force, M(community.general.hpilo_boot) refuses to reboot a server that is already running. default: false type: bool ssl_version: @@ -70,16 +73,22 @@ options: - Change the ssl_version used. default: TLSv1 type: str - choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ] + choices: ["SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2"] + idempotent_boot_once: + description: + - This option makes O(state=boot_once) succeed instead of failing when the server is already powered on. + type: bool + default: false + version_added: 10.6.0 requirements: -- python-hpilo + - python-hpilo notes: -- To use a USB key image you need to specify floppy as boot media. -- This module ought to be run from a system that can access the HP iLO - interface directly, either by using C(local_action) or using C(delegate_to). -''' + - To use a USB key image you need to specify floppy as boot media. + - This module ought to be run from a system that can access the HP iLO interface directly, either by using C(local_action) + or using C(delegate_to). +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Task to boot a system using an ISO from an HP iLO interface only if the system is an HP server community.general.hpilo_boot: host: YOUR_ILO_ADDRESS @@ -97,11 +106,11 @@ EXAMPLES = r''' password: YOUR_ILO_PASSWORD state: poweroff delegate_to: localhost -''' +""" -RETURN = ''' +RETURN = r""" # Default return values -''' +""" import time import traceback @@ -133,6 +142,7 @@ def main(): image=dict(type='str'), state=dict(type='str', default='boot_once', choices=['boot_always', 'boot_once', 'connect', 'disconnect', 'no_boot', 'poweroff']), force=dict(type='bool', default=False), + idempotent_boot_once=dict(type='bool', default=False), ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']), ) ) @@ -147,6 +157,7 @@ def main(): image = module.params['image'] state = module.params['state'] force = module.params['force'] + idempotent_boot_once = module.params['idempotent_boot_once'] ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v')) ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version) @@ -182,13 +193,21 @@ def main(): power_status = ilo.get_host_power_status() - if not force and power_status == 'ON': - module.fail_json(msg='HP iLO (%s) reports that the server is already powered on !' % host) - if power_status == 'ON': - ilo.warm_boot_server() -# ilo.cold_boot_server() - changed = True + if not force and not idempotent_boot_once: + # module.deprecate( + # 'The failure of the module when the server is already powered on is being deprecated.' + # ' Please set the parameter "idempotent_boot_once=true" to start using the new behavior.', + # version='11.0.0', + # collection_name='community.general' + # ) + module.fail_json(msg='HP iLO (%s) reports that the server is already powered on !' % host) + elif not force and idempotent_boot_once: + pass + elif force: + ilo.warm_boot_server() + # ilo.cold_boot_server() + changed = True else: ilo.press_pwr_btn() # ilo.reset_server() diff --git a/plugins/modules/hpilo_info.py b/plugins/modules/hpilo_info.py index cef6597e48..8f2739180d 100644 --- a/plugins/modules/hpilo_info.py +++ b/plugins/modules/hpilo_info.py @@ -1,33 +1,27 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright 2012 Dag Wieers # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: hpilo_info author: Dag Wieers (@dagwieers) short_description: Gather information through an HP iLO interface description: -- This module gathers information on a specific system using its HP iLO interface. - These information includes hardware and network related information useful - for provisioning (e.g. macaddress, uuid). -- This module requires the C(hpilo) python module. -- This module was called C(hpilo_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.hpilo_info) module no longer returns C(ansible_facts)! + - This module gathers information on a specific system using its HP iLO interface. These information includes hardware and + network related information useful for provisioning (for example macaddress, uuid). + - This module requires the C(hpilo) python module. extends_documentation_fragment: -- community.general.attributes -- community.general.attributes.info_module + - community.general.attributes + - community.general.attributes.info_module options: host: description: - - The HP iLO hostname/address that is linked to the physical system. + - The HP iLO hostname/address that is linked to the physical system. type: str required: true login: @@ -45,15 +39,15 @@ options: - Change the ssl_version used. default: TLSv1 type: str - choices: [ "SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" ] + choices: ["SSLv3", "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2"] requirements: -- hpilo + - hpilo notes: -- This module ought to be run from a system that can access the HP iLO - interface directly, either by using C(local_action) or using C(delegate_to). -''' + - This module ought to be run from a system that can access the HP iLO interface directly, either by using C(local_action) + or using C(delegate_to). +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Gather facts from a HP iLO interface only if the system is an HP server community.general.hpilo_info: host: YOUR_ILO_ADDRESS @@ -66,71 +60,71 @@ EXAMPLES = r''' - ansible.builtin.fail: msg: 'CMDB serial ({{ cmdb_serialno }}) does not match hardware serial ({{ results.hw_system_serial }}) !' when: cmdb_serialno != results.hw_system_serial -''' +""" -RETURN = r''' +RETURN = r""" # Typical output of HP iLO_info for a physical system hw_bios_date: - description: BIOS date - returned: always - type: str - sample: 05/05/2011 + description: BIOS date. + returned: always + type: str + sample: 05/05/2011 hw_bios_version: - description: BIOS version - returned: always - type: str - sample: P68 + description: BIOS version. + returned: always + type: str + sample: P68 hw_ethX: - description: Interface information (for each interface) - returned: always - type: dict - sample: - - macaddress: 00:11:22:33:44:55 - macaddress_dash: 00-11-22-33-44-55 + description: Interface information (for each interface). + returned: always + type: dict + sample: + - macaddress: 00:11:22:33:44:55 + macaddress_dash: 00-11-22-33-44-55 hw_eth_ilo: - description: Interface information (for the iLO network interface) - returned: always - type: dict - sample: - - macaddress: 00:11:22:33:44:BA - - macaddress_dash: 00-11-22-33-44-BA + description: Interface information (for the iLO network interface). + returned: always + type: dict + sample: + - macaddress: 00:11:22:33:44:BA + - macaddress_dash: 00-11-22-33-44-BA hw_product_name: - description: Product name - returned: always - type: str - sample: ProLiant DL360 G7 + description: Product name. + returned: always + type: str + sample: ProLiant DL360 G7 hw_product_uuid: - description: Product UUID - returned: always - type: str - sample: ef50bac8-2845-40ff-81d9-675315501dac + description: Product UUID. + returned: always + type: str + sample: ef50bac8-2845-40ff-81d9-675315501dac hw_system_serial: - description: System serial number - returned: always - type: str - sample: ABC12345D6 + description: System serial number. + returned: always + type: str + sample: ABC12345D6 hw_uuid: - description: Hardware UUID - returned: always - type: str - sample: 123456ABC78901D2 + description: Hardware UUID. + returned: always + type: str + sample: 123456ABC78901D2 host_power_status: - description: - - Power status of host. - - Will be one of C(ON), C(OFF) and C(UNKNOWN). - returned: always - type: str - sample: "ON" - version_added: 3.5.0 -''' + description: + - Power status of host. + - It is one of V(ON), V(OFF) and V(UNKNOWN). + returned: always + type: str + sample: "ON" + version_added: 3.5.0 +""" import re import traceback diff --git a/plugins/modules/hponcfg.py b/plugins/modules/hponcfg.py index 65e40c46ed..a17a905916 100644 --- a/plugins/modules/hponcfg.py +++ b/plugins/modules/hponcfg.py @@ -1,50 +1,54 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2012, Dag Wieers # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: hponcfg author: Dag Wieers (@dagwieers) -short_description: Configure HP iLO interface using hponcfg +short_description: Configure HP iLO interface using C(hponcfg) description: - - This modules configures the HP iLO interface using hponcfg. + - This modules configures the HP iLO interface using C(hponcfg). +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: path: description: - - The XML file as accepted by hponcfg. + - The XML file as accepted by C(hponcfg). required: true aliases: ['src'] type: path minfw: description: - - The minimum firmware level needed. + - The minimum firmware level needed. required: false type: str executable: description: - - Path to the hponcfg executable (C(hponcfg) which uses $PATH). + - Path to the hponcfg executable (C(hponcfg) which uses E(PATH)). default: hponcfg type: str verbose: description: - - Run hponcfg in verbose mode (-v). + - Run C(hponcfg) in verbose mode (-v). default: false type: bool requirements: - - hponcfg tool + - hponcfg tool notes: - - You need a working hponcfg on the target system. -''' + - You need a working C(hponcfg) on the target system. +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Example hponcfg configuration XML ansible.builtin.copy: content: | @@ -71,7 +75,7 @@ EXAMPLES = r''' community.general.hponcfg: src: /tmp/enable-ssh.xml executable: /opt/hp/tools/hponcfg -''' +""" from ansible_collections.community.general.plugins.module_utils.cmd_runner import CmdRunner, cmd_runner_fmt from ansible_collections.community.general.plugins.module_utils.module_helper import ModuleHelper diff --git a/plugins/modules/htpasswd.py b/plugins/modules/htpasswd.py index 231506984a..d0e0941601 100644 --- a/plugins/modules/htpasswd.py +++ b/plugins/modules/htpasswd.py @@ -1,77 +1,84 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2013, Nimbis Services, Inc. # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: htpasswd short_description: Manage user files for basic authentication description: - Add and remove username/password entries in a password file using htpasswd. - This is used by web servers such as Apache and Nginx for basic authentication. +attributes: + check_mode: + support: full + diff_mode: + support: none options: path: type: path required: true - aliases: [ dest, destfile ] + aliases: [dest, destfile] description: - - Path to the file that contains the usernames and passwords + - Path to the file that contains the usernames and passwords. name: type: str required: true - aliases: [ username ] + aliases: [username] description: - - User name to add or remove + - User name to add or remove. password: type: str required: false description: - Password associated with user. - Must be specified if user does not exist yet. - crypt_scheme: + hash_scheme: type: str required: false default: "apr_md5_crypt" description: - - Encryption scheme to be used. As well as the four choices listed - here, you can also use any other hash supported by passlib, such as - C(portable_apache22) and C(host_apache24); or C(md5_crypt) and C(sha256_crypt), - which are Linux passwd hashes. Only some schemes in addition to - the four choices below will be compatible with Apache or Nginx, and - supported schemes depend on passlib version and its dependencies. + - Hashing scheme to be used. As well as the four choices listed here, you can also use any other hash supported by passlib, + such as V(portable_apache22) and V(host_apache24); or V(md5_crypt) and V(sha256_crypt), which are Linux passwd hashes. + Only some schemes in addition to the four choices below are compatible with Apache or Nginx, and supported schemes + depend on C(passlib) version and its dependencies. - See U(https://passlib.readthedocs.io/en/stable/lib/passlib.apache.html#passlib.apache.HtpasswdFile) parameter C(default_scheme). - - 'Some of the available choices might be: C(apr_md5_crypt), C(des_crypt), C(ldap_sha1), C(plaintext).' + - 'Some of the available choices might be: V(apr_md5_crypt), V(des_crypt), V(ldap_sha1), V(plaintext).' + - 'B(WARNING): The module has no mechanism to determine the O(hash_scheme) of an existing entry, therefore, it does + not detect whether the O(hash_scheme) has changed. If you want to change the scheme, you must remove the existing + entry and then create a new one using the new scheme.' + aliases: [crypt_scheme] state: type: str required: false - choices: [ present, absent ] + choices: [present, absent] default: "present" description: - - Whether the user entry should be present or not + - Whether the user entry should be present or not. create: required: false type: bool default: true description: - - Used with I(state=present). If specified, the file will be created - if it does not already exist. If set to C(false), will fail if the - file does not exist + - Used with O(state=present). If V(true), the file is created if it does not exist. Conversely, if set to V(false) and + the file does not exist, it fails. notes: - - "This module depends on the I(passlib) Python library, which needs to be installed on all target systems." - - "On Debian, Ubuntu, or Fedora: install I(python-passlib)." - - "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)." -requirements: [ passlib>=1.6 ] + - This module depends on the C(passlib) Python library, which needs to be installed on all target systems. + - 'On Debian < 11, Ubuntu <= 20.04, or Fedora: install C(python-passlib).' + - 'On Debian, Ubuntu: install C(python3-passlib).' + - 'On RHEL or CentOS: Enable EPEL, then install C(python-passlib).' +requirements: [passlib>=1.6] author: "Ansible Core Team" -extends_documentation_fragment: files -''' +extends_documentation_fragment: + - files + - community.general.attributes +""" -EXAMPLES = """ +EXAMPLES = r""" - name: Add a user to a password file and ensure permissions are set community.general.htpasswd: path: /etc/nginx/passwdfile @@ -79,7 +86,7 @@ EXAMPLES = """ password: '9s36?;fyNp' owner: root group: www-data - mode: 0640 + mode: '0640' - name: Remove a user from a password file community.general.htpasswd: @@ -92,28 +99,22 @@ EXAMPLES = """ path: /etc/mail/passwords name: alex password: oedu2eGh - crypt_scheme: md5_crypt + hash_scheme: md5_crypt """ import os import tempfile -import traceback -from ansible.module_utils.basic import AnsibleModule, missing_required_lib + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.general.plugins.module_utils import deps from ansible.module_utils.common.text.converters import to_native -from ansible_collections.community.general.plugins.module_utils.version import LooseVersion -PASSLIB_IMP_ERR = None -try: +with deps.declare("passlib"): from passlib.apache import HtpasswdFile, htpasswd_context from passlib.context import CryptContext - import passlib -except ImportError: - PASSLIB_IMP_ERR = traceback.format_exc() - passlib_installed = False -else: - passlib_installed = True + apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"] @@ -124,50 +125,34 @@ def create_missing_directories(dest): os.makedirs(destpath) -def present(dest, username, password, crypt_scheme, create, check_mode): +def present(dest, username, password, hash_scheme, create, check_mode): """ Ensures user is present Returns (msg, changed) """ - if crypt_scheme in apache_hashes: + if hash_scheme in apache_hashes: context = htpasswd_context else: - context = CryptContext(schemes=[crypt_scheme] + apache_hashes) + context = CryptContext(schemes=[hash_scheme] + apache_hashes) if not os.path.exists(dest): if not create: raise ValueError('Destination %s does not exist' % dest) if check_mode: return ("Create %s" % dest, True) create_missing_directories(dest) - if LooseVersion(passlib.__version__) >= LooseVersion('1.6'): - ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme, context=context) - else: - ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme, context=context) - if getattr(ht, 'set_password', None): - ht.set_password(username, password) - else: - ht.update(username, password) + ht = HtpasswdFile(dest, new=True, default_scheme=hash_scheme, context=context) + ht.set_password(username, password) ht.save() return ("Created %s and added %s" % (dest, username), True) else: - if LooseVersion(passlib.__version__) >= LooseVersion('1.6'): - ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme, context=context) - else: - ht = HtpasswdFile(dest, default=crypt_scheme, context=context) + ht = HtpasswdFile(dest, new=False, default_scheme=hash_scheme, context=context) - found = None - if getattr(ht, 'check_password', None): - found = ht.check_password(username, password) - else: - found = ht.verify(username, password) + found = ht.check_password(username, password) if found: return ("%s already present" % username, False) else: if not check_mode: - if getattr(ht, 'set_password', None): - ht.set_password(username, password) - else: - ht.update(username, password) + ht.set_password(username, password) ht.save() return ("Add/update %s" % username, True) @@ -176,10 +161,7 @@ def absent(dest, username, check_mode): """ Ensures user is absent Returns (msg, changed) """ - if LooseVersion(passlib.__version__) >= LooseVersion('1.6'): - ht = HtpasswdFile(dest, new=False) - else: - ht = HtpasswdFile(dest) + ht = HtpasswdFile(dest, new=False) if username not in ht.users(): return ("%s not present" % username, False) @@ -207,9 +189,9 @@ def main(): arg_spec = dict( path=dict(type='path', required=True, aliases=["dest", "destfile"]), name=dict(type='str', required=True, aliases=["username"]), - password=dict(type='str', required=False, default=None, no_log=True), - crypt_scheme=dict(type='str', required=False, default="apr_md5_crypt"), - state=dict(type='str', required=False, default="present", choices=["present", "absent"]), + password=dict(type='str', no_log=True), + hash_scheme=dict(type='str', default="apr_md5_crypt", aliases=["crypt_scheme"]), + state=dict(type='str', default="present", choices=["present", "absent"]), create=dict(type='bool', default=True), ) @@ -220,25 +202,18 @@ def main(): path = module.params['path'] username = module.params['name'] password = module.params['password'] - crypt_scheme = module.params['crypt_scheme'] + hash_scheme = module.params['hash_scheme'] state = module.params['state'] create = module.params['create'] check_mode = module.check_mode - if not passlib_installed: - module.fail_json(msg=missing_required_lib("passlib"), exception=PASSLIB_IMP_ERR) + deps.validate(module) + # TODO double check if this hack below is still needed. # Check file for blank lines in effort to avoid "need more than 1 value to unpack" error. try: - f = open(path, "r") - except IOError: - # No preexisting file to remove blank lines from - f = None - else: - try: + with open(path, "r") as f: lines = f.readlines() - finally: - f.close() # If the file gets edited, it returns true, so only edit the file if it has blank lines strip = False @@ -252,24 +227,26 @@ def main(): if check_mode: temp = tempfile.NamedTemporaryFile() path = temp.name - f = open(path, "w") - try: - [f.write(line) for line in lines if line.strip()] - finally: - f.close() + with open(path, "w") as f: + f.writelines(line for line in lines if line.strip()) + + except IOError: + # No preexisting file to remove blank lines from + pass try: if state == 'present': - (msg, changed) = present(path, username, password, crypt_scheme, create, check_mode) + (msg, changed) = present(path, username, password, hash_scheme, create, check_mode) elif state == 'absent': if not os.path.exists(path): - module.exit_json(msg="%s not present" % username, - warnings="%s does not exist" % path, changed=False) + module.warn("%s does not exist" % path) + module.exit_json(msg="%s not present" % username, changed=False) (msg, changed) = absent(path, username, check_mode) else: module.fail_json(msg="Invalid state: %s" % state) + return # needed to make pylint happy - check_file_attrs(module, changed, msg) + (msg, changed) = check_file_attrs(module, changed, msg) module.exit_json(msg=msg, changed=changed) except Exception as e: module.fail_json(msg=to_native(e)) diff --git a/plugins/modules/hwc_ecs_instance.py b/plugins/modules/hwc_ecs_instance.py index 10d913f9b5..610cd8b872 100644 --- a/plugins/modules/hwc_ecs_instance.py +++ b/plugins/modules/hwc_ecs_instance.py @@ -1,235 +1,216 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (C) 2019 Huawei # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations ############################################################################### # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_ecs_instance description: - - instance management. + - Instance management. short_description: Creates a resource of Ecs/Instance in Huawei Cloud version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: - description: - - The timeouts for each operations. - type: dict - default: {} - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '30m' - update: - description: - - The timeouts for update operation. - type: str - default: '30m' - delete: - description: - - The timeouts for delete operation. - type: str - default: '30m' - availability_zone: - description: - - Specifies the name of the AZ where the ECS is located. - type: str - required: true - flavor_name: - description: - - Specifies the name of the system flavor. - type: str - required: true - image_id: - description: - - Specifies the ID of the system image. - type: str - required: true - name: - description: - - Specifies the ECS name. Value requirements consists of 1 to 64 - characters, including letters, digits, underscores C(_), hyphens - (-), periods (.). - type: str - required: true - nics: - description: - - Specifies the NIC information of the ECS. Constraints the - network of the NIC must belong to the VPC specified by vpc_id. A - maximum of 12 NICs can be attached to an ECS. - type: list - elements: dict - required: true - suboptions: - ip_address: - description: - - Specifies the IP address of the NIC. The value is an IPv4 - address. Its value must be an unused IP - address in the network segment of the subnet. - type: str - required: true - subnet_id: - description: - - Specifies the ID of subnet. - type: str - required: true - root_volume: - description: - - Specifies the configuration of the ECS's system disks. - type: dict - required: true - suboptions: - volume_type: - description: - - Specifies the ECS system disk type. - - SATA is common I/O disk type. - - SAS is high I/O disk type. - - SSD is ultra-high I/O disk type. - - co-p1 is high I/O (performance-optimized I) disk type. - - uh-l1 is ultra-high I/O (latency-optimized) disk type. - - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1 - disks. For other ECSs, do not use co-p1 or uh-l1 disks. - type: str - required: true - size: - description: - - Specifies the system disk size, in GB. The value range is - 1 to 1024. The system disk size must be - greater than or equal to the minimum system disk size - supported by the image (min_disk attribute of the image). - If this parameter is not specified or is set to 0, the - default system disk size is the minimum value of the - system disk in the image (min_disk attribute of the - image). - type: int - required: false - snapshot_id: - description: - - Specifies the snapshot ID or ID of the original data disk - contained in the full-ECS image. - type: str - required: false - vpc_id: - description: - - Specifies the ID of the VPC to which the ECS belongs. - type: str - required: true - admin_pass: - description: - - Specifies the initial login password of the administrator account - for logging in to an ECS using password authentication. The Linux - administrator is root, and the Windows administrator is - Administrator. Password complexity requirements, consists of 8 to - 26 characters. The password must contain at least three of the - following character types 'uppercase letters, lowercase letters, - digits, and special characters (!@$%^-_=+[{}]:,./?)'. The password - cannot contain the username or the username in reverse. The - Windows ECS password cannot contain the username, the username in - reverse, or more than two consecutive characters in the username. - type: str - required: false - data_volumes: - description: - - Specifies the data disks of ECS instance. - type: list - elements: dict - required: false - suboptions: - volume_id: - description: - - Specifies the disk ID. - type: str - required: true - device: - description: - - Specifies the disk device name. - type: str - required: false + state: description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: description: - - Specifies the description of an ECS, which is a null string by - default. Can contain a maximum of 85 characters. Cannot contain - special characters, such as < and >. + - The timeouts for create operation. + type: str + default: '30m' + update: + description: + - The timeouts for update operation. + type: str + default: '30m' + delete: + description: + - The timeouts for delete operation. + type: str + default: '30m' + availability_zone: + description: + - Specifies the name of the AZ where the ECS is located. + type: str + required: true + flavor_name: + description: + - Specifies the name of the system flavor. + type: str + required: true + image_id: + description: + - Specifies the ID of the system image. + type: str + required: true + name: + description: + - Specifies the ECS name. Value requirements consists of 1 to 64 characters, including letters, digits, underscores + (V(_)), hyphens (V(-)), periods (V(.)). + type: str + required: true + nics: + description: + - Specifies the NIC information of the ECS. Constraints the network of the NIC must belong to the VPC specified by vpc_id. + A maximum of 12 NICs can be attached to an ECS. + type: list + elements: dict + required: true + suboptions: + ip_address: + description: + - Specifies the IP address of the NIC. The value is an IPv4 address. Its value must be an unused IP address in the + network segment of the subnet. + type: str + required: true + subnet_id: + description: + - Specifies the ID of subnet. + type: str + required: true + root_volume: + description: + - Specifies the configuration of the ECS's system disks. + type: dict + required: true + suboptions: + volume_type: + description: + - Specifies the ECS system disk type. + - SATA is common I/O disk type. + - SAS is high I/O disk type. + - SSD is ultra-high I/O disk type. + - Co-p1 is high I/O (performance-optimized I) disk type. + - Uh-l1 is ultra-high I/O (latency-optimized) disk type. + - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1 disks. For other ECSs, do not use co-p1 or uh-l1 disks. + type: str + required: true + size: + description: + - Specifies the system disk size, in GB. The value range is 1 to 1024. The system disk size must be greater than + or equal to the minimum system disk size supported by the image (min_disk attribute of the image). If this parameter + is not specified or is set to 0, the default system disk size is the minimum value of the system disk in the image + (min_disk attribute of the image). + type: int + required: false + snapshot_id: + description: + - Specifies the snapshot ID or ID of the original data disk contained in the full-ECS image. type: str required: false - eip_id: + vpc_id: + description: + - Specifies the ID of the VPC to which the ECS belongs. + type: str + required: true + admin_pass: + description: + - Specifies the initial login password of the administrator account for logging in to an ECS using password authentication. + The Linux administrator is root, and the Windows administrator is Administrator. Password complexity requirements, + consists of 8 to 26 characters. The password must contain at least three of the following character types 'uppercase + letters, lowercase letters, digits, and special characters (V(!@$%^-_=+[{}]:,./?))'. The password cannot contain the + username or the username in reverse. The Windows ECS password cannot contain the username, the username in reverse, + or more than two consecutive characters in the username. + type: str + required: false + data_volumes: + description: + - Specifies the data disks of ECS instance. + type: list + elements: dict + required: false + suboptions: + volume_id: description: - - Specifies the ID of the elastic IP address assigned to the ECS. - Only elastic IP addresses in the DOWN state can be - assigned. - type: str - required: false - enable_auto_recovery: - description: - - Specifies whether automatic recovery is enabled on the ECS. - type: bool - required: false - enterprise_project_id: - description: - - Specifies the ID of the enterprise project to which the ECS - belongs. - type: str - required: false - security_groups: - description: - - Specifies the security groups of the ECS. If this - parameter is left blank, the default security group is bound to - the ECS by default. - type: list - elements: str - required: false - server_metadata: - description: - - Specifies the metadata of ECS to be created. - type: dict - required: false - server_tags: - description: - - Specifies the tags of an ECS. When you create ECSs, one ECS - supports up to 10 tags. - type: dict - required: false - ssh_key_name: - description: - - Specifies the name of the SSH key used for logging in to the ECS. - type: str - required: false - user_data: - description: - - Specifies the user data to be injected during the ECS creation - process. Text, text files, and gzip files can be injected. - The content to be injected must be encoded with - base64. The maximum size of the content to be injected (before - encoding) is 32 KB. For Linux ECSs, this parameter does not take - effect when adminPass is used. + - Specifies the disk ID. + type: str + required: true + device: + description: + - Specifies the disk device name. type: str required: false + description: + description: + - Specifies the description of an ECS, which is a null string by default. Can contain a maximum of 85 characters. Cannot + contain special characters, such as V(<) and V(>). + type: str + required: false + eip_id: + description: + - Specifies the ID of the elastic IP address assigned to the ECS. Only elastic IP addresses in the DOWN state can be + assigned. + type: str + required: false + enable_auto_recovery: + description: + - Specifies whether automatic recovery is enabled on the ECS. + type: bool + required: false + enterprise_project_id: + description: + - Specifies the ID of the enterprise project to which the ECS belongs. + type: str + required: false + security_groups: + description: + - Specifies the security groups of the ECS. If this parameter is left blank, the default security group is bound to + the ECS by default. + type: list + elements: str + required: false + server_metadata: + description: + - Specifies the metadata of ECS to be created. + type: dict + required: false + server_tags: + description: + - Specifies the tags of an ECS. When you create ECSs, one ECS supports up to 10 tags. + type: dict + required: false + ssh_key_name: + description: + - Specifies the name of the SSH key used for logging in to the ECS. + type: str + required: false + user_data: + description: + - Specifies the user data to be injected during the ECS creation process. Text, text files, and gzip files can be injected. + The content to be injected must be encoded with base64. The maximum size of the content to be injected (before encoding) + is 32 KB. For Linux ECSs, this parameter does not take effect when adminPass is used. + type: str + required: false extends_documentation_fragment: -- community.general.hwc + - community.general.hwc + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # create an ecs instance - name: Create a vpc hwc_network_vpc: @@ -279,238 +260,216 @@ EXAMPLES = ''' vpc_id: "{{ vpc.id }}" root_volume: volume_type: "SAS" -''' +""" -RETURN = ''' - availability_zone: - description: - - Specifies the name of the AZ where the ECS is located. - type: str - returned: success - flavor_name: - description: - - Specifies the name of the system flavor. - type: str - returned: success - image_id: - description: - - Specifies the ID of the system image. - type: str - returned: success - name: - description: - - Specifies the ECS name. Value requirements "Consists of 1 to 64 - characters, including letters, digits, underscores C(_), hyphens - (-), periods (.)". - type: str - returned: success - nics: - description: - - Specifies the NIC information of the ECS. The - network of the NIC must belong to the VPC specified by vpc_id. A - maximum of 12 NICs can be attached to an ECS. - type: list - returned: success - contains: - ip_address: - description: - - Specifies the IP address of the NIC. The value is an IPv4 - address. Its value must be an unused IP - address in the network segment of the subnet. - type: str - returned: success - subnet_id: - description: - - Specifies the ID of subnet. - type: str - returned: success - port_id: - description: - - Specifies the port ID corresponding to the IP address. - type: str - returned: success - root_volume: - description: - - Specifies the configuration of the ECS's system disks. - type: dict - returned: success - contains: - volume_type: - description: - - Specifies the ECS system disk type. - - SATA is common I/O disk type. - - SAS is high I/O disk type. - - SSD is ultra-high I/O disk type. - - co-p1 is high I/O (performance-optimized I) disk type. - - uh-l1 is ultra-high I/O (latency-optimized) disk type. - - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1 - disks. For other ECSs, do not use co-p1 or uh-l1 disks. - type: str - returned: success - size: - description: - - Specifies the system disk size, in GB. The value range is - 1 to 1024. The system disk size must be - greater than or equal to the minimum system disk size - supported by the image (min_disk attribute of the image). - If this parameter is not specified or is set to 0, the - default system disk size is the minimum value of the - system disk in the image (min_disk attribute of the - image). - type: int - returned: success - snapshot_id: - description: - - Specifies the snapshot ID or ID of the original data disk - contained in the full-ECS image. - type: str - returned: success - device: - description: - - Specifies the disk device name. - type: str - returned: success - volume_id: - description: - - Specifies the disk ID. - type: str - returned: success - vpc_id: - description: - - Specifies the ID of the VPC to which the ECS belongs. - type: str - returned: success - admin_pass: - description: - - Specifies the initial login password of the administrator account - for logging in to an ECS using password authentication. The Linux - administrator is root, and the Windows administrator is - Administrator. Password complexity requirements consists of 8 to - 26 characters. The password must contain at least three of the - following character types "uppercase letters, lowercase letters, - digits, and special characters (!@$%^-_=+[{}]:,./?)". The password - cannot contain the username or the username in reverse. The - Windows ECS password cannot contain the username, the username in - reverse, or more than two consecutive characters in the username. - type: str - returned: success - data_volumes: - description: - - Specifies the data disks of ECS instance. - type: list - returned: success - contains: - volume_id: - description: - - Specifies the disk ID. - type: str - returned: success - device: - description: - - Specifies the disk device name. - type: str - returned: success - description: - description: - - Specifies the description of an ECS, which is a null string by - default. Can contain a maximum of 85 characters. Cannot contain - special characters, such as < and >. - type: str - returned: success - eip_id: - description: - - Specifies the ID of the elastic IP address assigned to the ECS. - Only elastic IP addresses in the DOWN state can be assigned. - type: str - returned: success - enable_auto_recovery: - description: - - Specifies whether automatic recovery is enabled on the ECS. - type: bool - returned: success - enterprise_project_id: - description: - - Specifies the ID of the enterprise project to which the ECS - belongs. - type: str - returned: success - security_groups: - description: - - Specifies the security groups of the ECS. If this parameter is left - blank, the default security group is bound to the ECS by default. - type: list - returned: success - server_metadata: - description: - - Specifies the metadata of ECS to be created. - type: dict - returned: success - server_tags: - description: - - Specifies the tags of an ECS. When you create ECSs, one ECS - supports up to 10 tags. - type: dict - returned: success - ssh_key_name: - description: - - Specifies the name of the SSH key used for logging in to the ECS. - type: str - returned: success - user_data: - description: - - Specifies the user data to be injected during the ECS creation - process. Text, text files, and gzip files can be injected. - The content to be injected must be encoded with base64. The maximum - size of the content to be injected (before encoding) is 32 KB. For - Linux ECSs, this parameter does not take effect when adminPass is - used. - type: str - returned: success - config_drive: - description: - - Specifies the configuration driver. - type: str - returned: success - created: - description: - - Specifies the time when an ECS was created. - type: str - returned: success - disk_config_type: - description: - - Specifies the disk configuration type. MANUAL is The image - space is not expanded. AUTO is the image space of the system disk - will be expanded to be as same as the flavor. - type: str - returned: success - host_name: - description: - - Specifies the host name of the ECS. - type: str - returned: success - image_name: - description: - - Specifies the image name of the ECS. - type: str - returned: success - power_state: - description: - - Specifies the power status of the ECS. - type: int - returned: success - server_alias: - description: - - Specifies the ECS alias. - type: str - returned: success - status: - description: - - Specifies the ECS status. Options are ACTIVE, REBOOT, HARD_REBOOT, - REBUILD, MIGRATING, BUILD, SHUTOFF, RESIZE, VERIFY_RESIZE, ERROR, - and DELETED. - type: str - returned: success -''' +RETURN = r""" +availability_zone: + description: + - Specifies the name of the AZ where the ECS is located. + type: str + returned: success +flavor_name: + description: + - Specifies the name of the system flavor. + type: str + returned: success +image_id: + description: + - Specifies the ID of the system image. + type: str + returned: success +name: + description: + - Specifies the ECS name. Value requirements "Consists of 1 to 64 characters, including letters, digits, underscores (V(_)), + hyphens (V(-)), periods (V(.)).". + type: str + returned: success +nics: + description: + - Specifies the NIC information of the ECS. The network of the NIC must belong to the VPC specified by vpc_id. A maximum + of 12 NICs can be attached to an ECS. + type: list + returned: success + contains: + ip_address: + description: + - Specifies the IP address of the NIC. The value is an IPv4 address. Its value must be an unused IP address in the + network segment of the subnet. + type: str + returned: success + subnet_id: + description: + - Specifies the ID of subnet. + type: str + returned: success + port_id: + description: + - Specifies the port ID corresponding to the IP address. + type: str + returned: success +root_volume: + description: + - Specifies the configuration of the ECS's system disks. + type: dict + returned: success + contains: + volume_type: + description: + - Specifies the ECS system disk type. + - SATA is common I/O disk type. + - SAS is high I/O disk type. + - SSD is ultra-high I/O disk type. + - Co-p1 is high I/O (performance-optimized I) disk type. + - Uh-l1 is ultra-high I/O (latency-optimized) disk type. + - NOTE is For HANA, HL1, and HL2 ECSs, use co-p1 and uh-l1 disks. For other ECSs, do not use co-p1 or uh-l1 disks. + type: str + returned: success + size: + description: + - Specifies the system disk size, in GB. The value range is 1 to 1024. The system disk size must be greater than or + equal to the minimum system disk size supported by the image (min_disk attribute of the image). If this parameter + is not specified or is set to 0, the default system disk size is the minimum value of the system disk in the image + (min_disk attribute of the image). + type: int + returned: success + snapshot_id: + description: + - Specifies the snapshot ID or ID of the original data disk contained in the full-ECS image. + type: str + returned: success + device: + description: + - Specifies the disk device name. + type: str + returned: success + volume_id: + description: + - Specifies the disk ID. + type: str + returned: success +vpc_id: + description: + - Specifies the ID of the VPC to which the ECS belongs. + type: str + returned: success +admin_pass: + description: + - Specifies the initial login password of the administrator account for logging in to an ECS using password authentication. + The Linux administrator is root, and the Windows administrator is Administrator. Password complexity requirements consists + of 8 to 26 characters. The password must contain at least three of the following character types "uppercase letters, + lowercase letters, digits, and special characters (!@$%^-_=+[{}]:,./?)". The password cannot contain the username or + the username in reverse. The Windows ECS password cannot contain the username, the username in reverse, or more than + two consecutive characters in the username. + type: str + returned: success +data_volumes: + description: + - Specifies the data disks of ECS instance. + type: list + returned: success + contains: + volume_id: + description: + - Specifies the disk ID. + type: str + returned: success + device: + description: + - Specifies the disk device name. + type: str + returned: success +description: + description: + - Specifies the description of an ECS, which is a null string by default. Can contain a maximum of 85 characters. Cannot + contain special characters, such as < and >. + type: str + returned: success +eip_id: + description: + - Specifies the ID of the elastic IP address assigned to the ECS. Only elastic IP addresses in the DOWN state can be assigned. + type: str + returned: success +enable_auto_recovery: + description: + - Specifies whether automatic recovery is enabled on the ECS. + type: bool + returned: success +enterprise_project_id: + description: + - Specifies the ID of the enterprise project to which the ECS belongs. + type: str + returned: success +security_groups: + description: + - Specifies the security groups of the ECS. If this parameter is left blank, the default security group is bound to the + ECS by default. + type: list + returned: success +server_metadata: + description: + - Specifies the metadata of ECS to be created. + type: dict + returned: success +server_tags: + description: + - Specifies the tags of an ECS. When you create ECSs, one ECS supports up to 10 tags. + type: dict + returned: success +ssh_key_name: + description: + - Specifies the name of the SSH key used for logging in to the ECS. + type: str + returned: success +user_data: + description: + - Specifies the user data to be injected during the ECS creation process. Text, text files, and gzip files can be injected. + The content to be injected must be encoded with base64. The maximum size of the content to be injected (before encoding) + is 32 KB. For Linux ECSs, this parameter does not take effect when adminPass is used. + type: str + returned: success +config_drive: + description: + - Specifies the configuration driver. + type: str + returned: success +created: + description: + - Specifies the time when an ECS was created. + type: str + returned: success +disk_config_type: + description: + - Specifies the disk configuration type. MANUAL is The image space is not expanded. AUTO is the image space of the system + disk is expanded to be as same as the flavor. + type: str + returned: success +host_name: + description: + - Specifies the host name of the ECS. + type: str + returned: success +image_name: + description: + - Specifies the image name of the ECS. + type: str + returned: success +power_state: + description: + - Specifies the power status of the ECS. + type: int + returned: success +server_alias: + description: + - Specifies the ECS alias. + type: str + returned: success +status: + description: + - Specifies the ECS status. Options are ACTIVE, REBOOT, HARD_REBOOT, REBUILD, MIGRATING, BUILD, SHUTOFF, RESIZE, VERIFY_RESIZE, + ERROR, and DELETED. + type: str + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcModule, are_different_dicts, build_path, @@ -1157,8 +1116,7 @@ def send_delete_volume_request(module, params, client, info): path_parameters = { "volume_id": ["volume_id"], } - data = dict((key, navigate_value(info, path)) - for key, path in path_parameters.items()) + data = {key: navigate_value(info, path) for key, path in path_parameters.items()} url = build_path(module, "cloudservers/{id}/detachvolume/{volume_id}", data) diff --git a/plugins/modules/hwc_evs_disk.py b/plugins/modules/hwc_evs_disk.py index 7b5a99fb7f..0963736ec2 100644 --- a/plugins/modules/hwc_evs_disk.py +++ b/plugins/modules/hwc_evs_disk.py @@ -1,160 +1,144 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (C) 2019 Huawei # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations ############################################################################### # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_evs_disk description: - - block storage management. + - Block storage management. short_description: Creates a resource of Evs/Disk in Huawei Cloud version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - Whether the given object should exist in Huaweicloud Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: - description: - - The timeouts for each operations. - type: dict - default: {} - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '30m' - update: - description: - - The timeouts for update operation. - type: str - default: '30m' - delete: - description: - - The timeouts for delete operation. - type: str - default: '30m' - availability_zone: - description: - - Specifies the AZ where you want to create the disk. - type: str - required: true - name: - description: - - Specifies the disk name. The value can contain a maximum of 255 - bytes. - type: str - required: true - volume_type: - description: - - Specifies the disk type. Currently, the value can be SSD, SAS, or - SATA. - - SSD specifies the ultra-high I/O disk type. - - SAS specifies the high I/O disk type. - - SATA specifies the common I/O disk type. - - If the specified disk type is not available in the AZ, the - disk will fail to create. If the EVS disk is created from a - snapshot, the volume_type field must be the same as that of the - snapshot's source disk. - type: str - required: true - backup_id: - description: - - Specifies the ID of the backup that can be used to create a disk. - This parameter is mandatory when you use a backup to create the - disk. - type: str - required: false + state: description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: description: - - Specifies the disk description. The value can contain a maximum - of 255 bytes. + - The timeouts for create operation. type: str - required: false - enable_full_clone: + default: '30m' + update: description: - - If the disk is created from a snapshot and linked cloning needs - to be used, set this parameter to True. - type: bool - required: false - enable_scsi: - description: - - If this parameter is set to True, the disk device type will be - SCSI, which allows ECS OSs to directly access underlying storage - media. SCSI reservation command is supported. If this parameter - is set to False, the disk device type will be VBD, which supports - only simple SCSI read/write commands. - - If parameter enable_share is set to True and this parameter - is not specified, shared SCSI disks are created. SCSI EVS disks - cannot be created from backups, which means that this parameter - cannot be True if backup_id has been specified. - type: bool - required: false - enable_share: - description: - - Specifies whether the disk is shareable. The default value is - False. - type: bool - required: false - encryption_id: - description: - - Specifies the encryption ID. The length of it fixes at 36 bytes. + - The timeouts for update operation. type: str - required: false - enterprise_project_id: + default: '30m' + delete: description: - - Specifies the enterprise project ID. This ID is associated with - the disk during the disk creation. If it is not specified, the - disk is bound to the default enterprise project. + - The timeouts for delete operation. type: str - required: false - image_id: - description: - - Specifies the image ID. If this parameter is specified, the disk - is created from an image. BMS system disks cannot be - created from BMS images. - type: str - required: false - size: - description: - - Specifies the disk size, in GB. Its values are as follows, System - disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. This - parameter is mandatory when you create an empty disk or use an - image or a snapshot to create a disk. If you use an image or a - snapshot to create a disk, the disk size must be greater than or - equal to the image or snapshot size. This parameter is optional - when you use a backup to create a disk. If this parameter is not - specified, the disk size is equal to the backup size. - type: int - required: false - snapshot_id: - description: - - Specifies the snapshot ID. If this parameter is specified, the - disk is created from a snapshot. - type: str - required: false + default: '30m' + availability_zone: + description: + - Specifies the AZ where you want to create the disk. + type: str + required: true + name: + description: + - Specifies the disk name. The value can contain a maximum of 255 bytes. + type: str + required: true + volume_type: + description: + - Specifies the disk type. Currently, the value can be SSD, SAS, or SATA. + - SSD specifies the ultra-high I/O disk type. + - SAS specifies the high I/O disk type. + - SATA specifies the common I/O disk type. + - If the specified disk type is not available in the AZ, the disk creation fails. If the EVS disk is created from a + snapshot, the volume_type field must be the same as that of the snapshot's source disk. + type: str + required: true + backup_id: + description: + - Specifies the ID of the backup that can be used to create a disk. This parameter is mandatory when you use a backup + to create the disk. + type: str + required: false + description: + description: + - Specifies the disk description. The value can contain a maximum of 255 bytes. + type: str + required: false + enable_full_clone: + description: + - If the disk is created from a snapshot and linked cloning needs to be used, set this parameter to True. + type: bool + required: false + enable_scsi: + description: + - If this parameter is set to V(true), the disk device type is SCSI, which allows ECS OSs to directly access underlying + storage media. SCSI reservation command is supported. If this parameter is set to V(false), the disk device type is + VBD, which supports only simple SCSI read/write commands. + - If parameter enable_share is set to True and this parameter is not specified, shared SCSI disks are created. SCSI + EVS disks cannot be created from backups, which means that this parameter cannot be True if backup_id has been specified. + type: bool + required: false + enable_share: + description: + - Specifies whether the disk is shareable. The default value is False. + type: bool + required: false + encryption_id: + description: + - Specifies the encryption ID. The length of it fixes at 36 bytes. + type: str + required: false + enterprise_project_id: + description: + - Specifies the enterprise project ID. This ID is associated with the disk during the disk creation. If it is not specified, + the disk is bound to the default enterprise project. + type: str + required: false + image_id: + description: + - Specifies the image ID. If this parameter is specified, the disk is created from an image. BMS system disks cannot + be created from BMS images. + type: str + required: false + size: + description: + - Specifies the disk size, in GB. Its values are as follows, System disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. + This parameter is mandatory when you create an empty disk or use an image or a snapshot to create a disk. If you use + an image or a snapshot to create a disk, the disk size must be greater than or equal to the image or snapshot size. + This parameter is optional when you use a backup to create a disk. If this parameter is not specified, the disk size + is equal to the backup size. + type: int + required: false + snapshot_id: + description: + - Specifies the snapshot ID. If this parameter is specified, the disk is created from a snapshot. + type: str + required: false extends_documentation_fragment: -- community.general.hwc + - community.general.hwc + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # test create disk - name: Create a disk community.general.hwc_evs_disk: @@ -162,176 +146,153 @@ EXAMPLES = ''' name: "ansible_evs_disk_test" volume_type: "SATA" size: 10 -''' +""" -RETURN = ''' - availability_zone: - description: - - Specifies the AZ where you want to create the disk. - type: str - returned: success - name: - description: - - Specifies the disk name. The value can contain a maximum of 255 - bytes. - type: str - returned: success - volume_type: - description: - - Specifies the disk type. Currently, the value can be SSD, SAS, or - SATA. - - SSD specifies the ultra-high I/O disk type. - - SAS specifies the high I/O disk type. - - SATA specifies the common I/O disk type. - - If the specified disk type is not available in the AZ, the - disk will fail to create. If the EVS disk is created from a - snapshot, the volume_type field must be the same as that of the - snapshot's source disk. - type: str - returned: success - backup_id: - description: - - Specifies the ID of the backup that can be used to create a disk. - This parameter is mandatory when you use a backup to create the - disk. - type: str - returned: success - description: - description: - - Specifies the disk description. The value can contain a maximum - of 255 bytes. - type: str - returned: success - enable_full_clone: - description: - - If the disk is created from a snapshot and linked cloning needs - to be used, set this parameter to True. - type: bool - returned: success - enable_scsi: - description: - - If this parameter is set to True, the disk device type will be - SCSI, which allows ECS OSs to directly access underlying storage - media. SCSI reservation command is supported. If this parameter - is set to False, the disk device type will be VBD, which supports - only simple SCSI read/write commands. - - If parameter enable_share is set to True and this parameter - is not specified, shared SCSI disks are created. SCSI EVS disks - cannot be created from backups, which means that this parameter - cannot be True if backup_id has been specified. - type: bool - returned: success - enable_share: - description: - - Specifies whether the disk is shareable. The default value is - False. - type: bool - returned: success - encryption_id: - description: - - Specifies the encryption ID. The length of it fixes at 36 bytes. - type: str - returned: success - enterprise_project_id: - description: - - Specifies the enterprise project ID. This ID is associated with - the disk during the disk creation. If it is not specified, the - disk is bound to the default enterprise project. - type: str - returned: success - image_id: - description: - - Specifies the image ID. If this parameter is specified, the disk - is created from an image. BMS system disks cannot be - created from BMS images. - type: str - returned: success - size: - description: - - Specifies the disk size, in GB. Its values are as follows, System - disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. This - parameter is mandatory when you create an empty disk or use an - image or a snapshot to create a disk. If you use an image or a - snapshot to create a disk, the disk size must be greater than or - equal to the image or snapshot size. This parameter is optional - when you use a backup to create a disk. If this parameter is not - specified, the disk size is equal to the backup size. - type: int - returned: success - snapshot_id: - description: - - Specifies the snapshot ID. If this parameter is specified, the - disk is created from a snapshot. - type: str - returned: success - attachments: - description: - - Specifies the disk attachment information. - type: complex - returned: success - contains: - attached_at: - description: - - Specifies the time when the disk was attached. Time - format is 'UTC YYYY-MM-DDTHH:MM:SS'. - type: str - returned: success - attachment_id: - description: - - Specifies the ID of the attachment information. - type: str - returned: success - device: - description: - - Specifies the device name. - type: str - returned: success - server_id: - description: - - Specifies the ID of the server to which the disk is - attached. - type: str - returned: success - backup_policy_id: - description: - - Specifies the backup policy ID. - type: str - returned: success - created_at: - description: - - Specifies the time when the disk was created. Time format is 'UTC - YYYY-MM-DDTHH:MM:SS'. - type: str - returned: success - is_bootable: - description: - - Specifies whether the disk is bootable. - type: bool - returned: success - is_readonly: - description: - - Specifies whether the disk is read-only or read/write. True - indicates that the disk is read-only. False indicates that the - disk is read/write. - type: bool - returned: success - source_volume_id: - description: - - Specifies the source disk ID. This parameter has a value if the - disk is created from a source disk. - type: str - returned: success - status: - description: - - Specifies the disk status. - type: str - returned: success - tags: - description: - - Specifies the disk tags. - type: dict - returned: success -''' +RETURN = r""" +availability_zone: + description: + - Specifies the AZ where you want to create the disk. + type: str + returned: success +name: + description: + - Specifies the disk name. The value can contain a maximum of 255 bytes. + type: str + returned: success +volume_type: + description: + - Specifies the disk type. Currently, the value can be SSD, SAS, or SATA. + - SSD specifies the ultra-high I/O disk type. + - SAS specifies the high I/O disk type. + - SATA specifies the common I/O disk type. + - If the specified disk type is not available in the AZ, the disk creation fails. If the EVS disk is created from a snapshot, + the volume_type field must be the same as that of the snapshot's source disk. + type: str + returned: success +backup_id: + description: + - Specifies the ID of the backup that can be used to create a disk. This parameter is mandatory when you use a backup + to create the disk. + type: str + returned: success +description: + description: + - Specifies the disk description. The value can contain a maximum of 255 bytes. + type: str + returned: success +enable_full_clone: + description: + - If the disk is created from a snapshot and linked cloning needs to be used, set this parameter to True. + type: bool + returned: success +enable_scsi: + description: + - If this parameter is set to V(true), the disk device type is SCSI, which allows ECS OSs to directly access underlying + storage media. SCSI reservation command is supported. If this parameter is set to V(false), the disk device type is + VBD, which supports only simple SCSI read/write commands. + - If parameter enable_share is set to True and this parameter is not specified, shared SCSI disks are created. SCSI EVS + disks cannot be created from backups, which means that this parameter cannot be True if backup_id has been specified. + type: bool + returned: success +enable_share: + description: + - Specifies whether the disk is shareable. The default value is False. + type: bool + returned: success +encryption_id: + description: + - Specifies the encryption ID. The length of it fixes at 36 bytes. + type: str + returned: success +enterprise_project_id: + description: + - Specifies the enterprise project ID. This ID is associated with the disk during the disk creation. If it is not specified, + the disk is bound to the default enterprise project. + type: str + returned: success +image_id: + description: + - Specifies the image ID. If this parameter is specified, the disk is created from an image. BMS system disks cannot be + created from BMS images. + type: str + returned: success +size: + description: + - Specifies the disk size, in GB. Its values are as follows, System disk 1 GB to 1024 GB, Data disk 10 GB to 32768 GB. + This parameter is mandatory when you create an empty disk or use an image or a snapshot to create a disk. If you use + an image or a snapshot to create a disk, the disk size must be greater than or equal to the image or snapshot size. + This parameter is optional when you use a backup to create a disk. If this parameter is not specified, the disk size + is equal to the backup size. + type: int + returned: success +snapshot_id: + description: + - Specifies the snapshot ID. If this parameter is specified, the disk is created from a snapshot. + type: str + returned: success +attachments: + description: + - Specifies the disk attachment information. + type: complex + returned: success + contains: + attached_at: + description: + - Specifies the time when the disk was attached. Time format is 'UTC YYYY-MM-DDTHH:MM:SS'. + type: str + returned: success + attachment_id: + description: + - Specifies the ID of the attachment information. + type: str + returned: success + device: + description: + - Specifies the device name. + type: str + returned: success + server_id: + description: + - Specifies the ID of the server to which the disk is attached. + type: str + returned: success +backup_policy_id: + description: + - Specifies the backup policy ID. + type: str + returned: success +created_at: + description: + - Specifies the time when the disk was created. Time format is 'UTC YYYY-MM-DDTHH:MM:SS'. + type: str + returned: success +is_bootable: + description: + - Specifies whether the disk is bootable. + type: bool + returned: success +is_readonly: + description: + - Specifies whether the disk is read-only or read/write. True indicates that the disk is read-only. False indicates that + the disk is read/write. + type: bool + returned: success +source_volume_id: + description: + - Specifies the source disk ID. This parameter has a value if the disk is created from a source disk. + type: str + returned: success +status: + description: + - Specifies the disk status. + type: str + returned: success +tags: + description: + - Specifies the disk tags. + type: dict + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcModule, are_different_dicts, build_path, @@ -765,8 +726,7 @@ def async_wait(config, result, client, timeout): path_parameters = { "job_id": ["job_id"], } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} url = build_path(module, "jobs/{job_id}", data) diff --git a/plugins/modules/hwc_network_vpc.py b/plugins/modules/hwc_network_vpc.py index 78f5925e0c..b974831c87 100644 --- a/plugins/modules/hwc_network_vpc.py +++ b/plugins/modules/hwc_network_vpc.py @@ -1,128 +1,129 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (C) 2018 Huawei # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations ############################################################################### # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_network_vpc description: - - Represents an vpc resource. + - Represents an vpc resource. short_description: Creates a Huawei Cloud VPC author: Huawei Inc. (@huaweicloud) requirements: - - requests >= 2.18.4 - - keystoneauth1 >= 3.6.0 + - requests >= 2.18.4 + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: + state: + description: + - Whether the given object should exist in VPC. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: description: - - Whether the given object should exist in vpc. + - The timeout for create operation. type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: + default: '15m' + update: description: - - The timeouts for each operations. - type: dict - default: {} - suboptions: - create: - description: - - The timeout for create operation. - type: str - default: '15m' - update: - description: - - The timeout for update operation. - type: str - default: '15m' - delete: - description: - - The timeout for delete operation. - type: str - default: '15m' - name: - description: - - The name of vpc. + - The timeout for update operation. type: str - required: true - cidr: + default: '15m' + delete: description: - - The range of available subnets in the vpc. + - The timeout for delete operation. type: str - required: true + default: '15m' + name: + description: + - The name of vpc. + type: str + required: true + cidr: + description: + - The range of available subnets in the VPC. + type: str + required: true extends_documentation_fragment: -- community.general.hwc + - community.general.hwc + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a vpc community.general.hwc_network_vpc: - identity_endpoint: "{{ identity_endpoint }}" - user: "{{ user }}" - password: "{{ password }}" - domain: "{{ domain }}" - project: "{{ project }}" - region: "{{ region }}" - name: "vpc_1" - cidr: "192.168.100.0/24" - state: present -''' + identity_endpoint: "{{ identity_endpoint }}" + user: "{{ user }}" + password: "{{ password }}" + domain: "{{ domain }}" + project: "{{ project }}" + region: "{{ region }}" + name: "vpc_1" + cidr: "192.168.100.0/24" + state: present +""" -RETURN = ''' - id: - description: - - the id of vpc. - type: str - returned: success - name: - description: - - the name of vpc. - type: str - returned: success - cidr: - description: - - the range of available subnets in the vpc. - type: str - returned: success - status: - description: - - the status of vpc. - type: str - returned: success - routes: - description: - - the route information. - type: complex - returned: success - contains: - destination: - description: - - the destination network segment of a route. - type: str - returned: success - next_hop: - description: - - the next hop of a route. If the route type is peering, - it will provide VPC peering connection ID. - type: str - returned: success - enable_shared_snat: - description: - - show whether the shared snat is enabled. - type: bool - returned: success -''' +RETURN = r""" +id: + description: + - The ID of VPC. + type: str + returned: success +name: + description: + - The name of VPC. + type: str + returned: success +cidr: + description: + - The range of available subnets in the VPC. + type: str + returned: success +status: + description: + - The status of VPC. + type: str + returned: success +routes: + description: + - The route information. + type: complex + returned: success + contains: + destination: + description: + - The destination network segment of a route. + type: str + returned: success + next_hop: + description: + - The next hop of a route. If the route type is peering, it provides VPC peering connection ID. + type: str + returned: success +enable_shared_snat: + description: + - Show whether the shared SNAT is enabled. + type: bool + returned: success +""" ############################################################################### # Imports @@ -375,13 +376,13 @@ def response_to_hash(module, response): This is for doing comparisons with Ansible's current parameters. """ return { - u'id': response.get(u'id'), - u'name': response.get(u'name'), - u'cidr': response.get(u'cidr'), - u'status': response.get(u'status'), - u'routes': VpcRoutesArray( - response.get(u'routes', []), module).from_response(), - u'enable_shared_snat': response.get(u'enable_shared_snat') + 'id': response.get('id'), + 'name': response.get('name'), + 'cidr': response.get('cidr'), + 'status': response.get('status'), + 'routes': VpcRoutesArray( + response.get('routes', []), module).from_response(), + 'enable_shared_snat': response.get('enable_shared_snat') } @@ -479,14 +480,14 @@ class VpcRoutesArray(object): def _request_for_item(self, item): return { - u'destination': item.get('destination'), - u'nexthop': item.get('next_hop') + 'destination': item.get('destination'), + 'nexthop': item.get('next_hop') } def _response_from_item(self, item): return { - u'destination': item.get(u'destination'), - u'next_hop': item.get(u'nexthop') + 'destination': item.get('destination'), + 'next_hop': item.get('nexthop') } diff --git a/plugins/modules/hwc_smn_topic.py b/plugins/modules/hwc_smn_topic.py index 3752e1f18f..6fb9a3814d 100644 --- a/plugins/modules/hwc_smn_topic.py +++ b/plugins/modules/hwc_smn_topic.py @@ -1,106 +1,101 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (C) 2019 Huawei # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations ############################################################################### # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_smn_topic description: - - Represents a SMN notification topic resource. -short_description: Creates a resource of SMNTopic in Huaweicloud Cloud + - Represents a SMN notification topic resource. +short_description: Creates a resource of SMNTopic in Huawei Cloud author: Huawei Inc. (@huaweicloud) requirements: - - requests >= 2.18.4 - - keystoneauth1 >= 3.6.0 + - requests >= 2.18.4 + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - Whether the given object should exist in Huaweicloud Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - display_name: - description: - - Topic display name, which is presented as the name of the email - sender in an email message. The topic display name contains a - maximum of 192 bytes. - type: str - required: false - name: - description: - - Name of the topic to be created. The topic name is a string of 1 - to 256 characters. It must contain upper- or lower-case letters, - digits, hyphens (-), and underscores C(_), and must start with a - letter or digit. - type: str - required: true + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + display_name: + description: + - Topic display name, which is presented as the name of the email sender in an email message. The topic display name + contains a maximum of 192 bytes. + type: str + required: false + name: + description: + - Name of the topic to be created. The topic name is a string of 1 to 256 characters. It must contain upper- or lower-case + letters, digits, hyphens (V(-)), and underscores (V(_)), and must start with a letter or digit. + type: str + required: true extends_documentation_fragment: -- community.general.hwc + - community.general.hwc + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a smn topic community.general.hwc_smn_topic: - identity_endpoint: "{{ identity_endpoint }}" - user_name: "{{ user_name }}" - password: "{{ password }}" - domain_name: "{{ domain_name }}" - project_name: "{{ project_name }}" - region: "{{ region }}" - name: "ansible_smn_topic_test" - state: present -''' + identity_endpoint: "{{ identity_endpoint }}" + user_name: "{{ user_name }}" + password: "{{ password }}" + domain_name: "{{ domain_name }}" + project_name: "{{ project_name }}" + region: "{{ region }}" + name: "ansible_smn_topic_test" + state: present +""" -RETURN = ''' +RETURN = r""" create_time: - description: - - Time when the topic was created. - returned: success - type: str + description: + - Time when the topic was created. + returned: success + type: str display_name: - description: - - Topic display name, which is presented as the name of the email - sender in an email message. The topic display name contains a - maximum of 192 bytes. - returned: success - type: str + description: + - Topic display name, which is presented as the name of the email sender in an email message. The topic display name contains + a maximum of 192 bytes. + returned: success + type: str name: - description: - - Name of the topic to be created. The topic name is a string of 1 - to 256 characters. It must contain upper- or lower-case letters, - digits, hyphens (-), and underscores C(_), and must start with a - letter or digit. - returned: success - type: str + description: + - Name of the topic to be created. The topic name is a string of 1 to 256 characters. It must contain upper- or lower-case + letters, digits, hyphens (V(-)), and underscores (V(_)), and must start with a letter or digit. + returned: success + type: str push_policy: - description: - - Message pushing policy. 0 indicates that the message sending - fails and the message is cached in the queue. 1 indicates that - the failed message is discarded. - returned: success - type: int + description: + - Message pushing policy. V(0) indicates that the message sending fails and the message is cached in the queue. V(1) indicates + that the failed message is discarded. + returned: success + type: int topic_urn: - description: - - Resource identifier of a topic, which is unique. - returned: success - type: str + description: + - Resource identifier of a topic, which is unique. + returned: success + type: str update_time: - description: - - Time when the topic was updated. - returned: success - type: str -''' + description: + - Time when the topic was updated. + returned: success + type: str +""" ############################################################################### # Imports @@ -317,13 +312,12 @@ def response_to_hash(module, response): This is for doing comparisons with Ansible's current parameters. """ return { - u'create_time': response.get(u'create_time'), - u'display_name': response.get(u'display_name'), - u'name': response.get(u'name'), - u'push_policy': _push_policy_convert_from_response( - response.get('push_policy')), - u'topic_urn': response.get(u'topic_urn'), - u'update_time': response.get(u'update_time') + 'create_time': response.get('create_time'), + 'display_name': response.get('display_name'), + 'name': response.get('name'), + 'push_policy': _push_policy_convert_from_response(response.get('push_policy')), + 'topic_urn': response.get('topic_urn'), + 'update_time': response.get('update_time') } diff --git a/plugins/modules/hwc_vpc_eip.py b/plugins/modules/hwc_vpc_eip.py index e14fb0e502..9a23b7b3f9 100644 --- a/plugins/modules/hwc_vpc_eip.py +++ b/plugins/modules/hwc_vpc_eip.py @@ -1,131 +1,119 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (C) 2019 Huawei # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations ############################################################################### # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_vpc_eip description: - - elastic ip management. -short_description: Creates a resource of Vpc/EIP in Huawei Cloud + - Elastic IP management. +short_description: Creates a resource of VPC/EIP in Huawei Cloud version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: description: - - Whether the given object should exist in Huawei Cloud. + - The timeouts for create operation. type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: + default: '5m' + update: description: - - The timeouts for each operations. - type: dict - default: {} - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '5m' - update: - description: - - The timeouts for update operation. - type: str - default: '5m' - type: + - The timeouts for update operation. + type: str + default: '5m' + type: + description: + - Specifies the EIP type. + type: str + required: true + dedicated_bandwidth: + description: + - Specifies the dedicated bandwidth object. + type: dict + required: false + suboptions: + charge_mode: description: - - Specifies the EIP type. + - Specifies whether the bandwidth is billed by traffic or by bandwidth size. The value can be bandwidth or traffic. + If this parameter is left blank or is null character string, default value bandwidth is used. For IPv6 addresses, + the default parameter value is bandwidth outside China and is traffic in China. type: str required: true - dedicated_bandwidth: + name: description: - - Specifies the dedicated bandwidth object. - type: dict - required: false - suboptions: - charge_mode: - description: - - Specifies whether the bandwidth is billed by traffic or - by bandwidth size. The value can be bandwidth or traffic. - If this parameter is left blank or is null character - string, default value bandwidth is used. For IPv6 - addresses, the default parameter value is bandwidth - outside China and is traffic in China. - type: str - required: true - name: - description: - - Specifies the bandwidth name. The value is a string of 1 - to 64 characters that can contain letters, digits, - underscores C(_), hyphens (-), and periods (.). - type: str - required: true - size: - description: - - Specifies the bandwidth size. The value ranges from 1 - Mbit/s to 2000 Mbit/s by default. (The specific range may - vary depending on the configuration in each region. You - can see the bandwidth range of each region on the - management console.) The minimum unit for bandwidth - adjustment varies depending on the bandwidth range. The - details are as follows. - - The minimum unit is 1 Mbit/s if the allowed bandwidth - size ranges from 0 to 300 Mbit/s (with 300 Mbit/s - included). - - The minimum unit is 50 Mbit/s if the allowed bandwidth - size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s - included). - - The minimum unit is 500 Mbit/s if the allowed bandwidth - size is greater than 1000 Mbit/s. - type: int - required: true - enterprise_project_id: - description: - - Specifies the enterprise project ID. + - Specifies the bandwidth name. The value is a string of 1 to 64 characters that can contain letters, digits, underscores + (V(_)), hyphens (V(-)), and periods (V(.)). type: str - required: false - ip_version: + required: true + size: description: - - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this - parameter is left blank, an IPv4 address will be assigned. + - Specifies the bandwidth size. The value ranges from 1 Mbit/s to 2000 Mbit/s by default. (The specific range may + vary depending on the configuration in each region. You can see the bandwidth range of each region on the management + console.) The minimum unit for bandwidth adjustment varies depending on the bandwidth range. The details are as + follows. + - The minimum unit is 1 Mbit/s if the allowed bandwidth size ranges from 0 to 300 Mbit/s (with 300 Mbit/s included). + - The minimum unit is 50 Mbit/s if the allowed bandwidth size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s + included). + - The minimum unit is 500 Mbit/s if the allowed bandwidth size is greater than 1000 Mbit/s. type: int - required: false - ipv4_address: - description: - - Specifies the obtained IPv4 EIP. The system automatically assigns - an EIP if you do not specify it. - type: str - required: false - port_id: - description: - - Specifies the port ID. This parameter is returned only when a - private IP address is bound with the EIP. - type: str - required: false - shared_bandwidth_id: - description: - - Specifies the ID of shared bandwidth. - type: str - required: false + required: true + enterprise_project_id: + description: + - Specifies the enterprise project ID. + type: str + required: false + ip_version: + description: + - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this parameter is left blank, an IPv4 address is assigned. + type: int + required: false + ipv4_address: + description: + - Specifies the obtained IPv4 EIP. The system automatically assigns an EIP if you do not specify it. + type: str + required: false + port_id: + description: + - Specifies the port ID. This parameter is returned only when a private IP address is bound with the EIP. + type: str + required: false + shared_bandwidth_id: + description: + - Specifies the ID of shared bandwidth. + type: str + required: false extends_documentation_fragment: -- community.general.hwc + - community.general.hwc + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # create an eip and bind it to a port - name: Create vpc hwc_network_vpc: @@ -153,107 +141,91 @@ EXAMPLES = ''' name: "ansible_test_dedicated_bandwidth" size: 1 port_id: "{{ port.id }}" -''' +""" -RETURN = ''' - type: - description: - - Specifies the EIP type. - type: str - returned: success - dedicated_bandwidth: - description: - - Specifies the dedicated bandwidth object. - type: dict - returned: success - contains: - charge_mode: - description: - - Specifies whether the bandwidth is billed by traffic or - by bandwidth size. The value can be bandwidth or traffic. - If this parameter is left blank or is null character - string, default value bandwidth is used. For IPv6 - addresses, the default parameter value is bandwidth - outside China and is traffic in China. - type: str - returned: success - name: - description: - - Specifies the bandwidth name. The value is a string of 1 - to 64 characters that can contain letters, digits, - underscores C(_), hyphens (-), and periods (.). - type: str - returned: success - size: - description: - - Specifies the bandwidth size. The value ranges from 1 - Mbit/s to 2000 Mbit/s by default. (The specific range may - vary depending on the configuration in each region. You - can see the bandwidth range of each region on the - management console.) The minimum unit for bandwidth - adjustment varies depending on the bandwidth range. The - details are as follows:. - - The minimum unit is 1 Mbit/s if the allowed bandwidth - size ranges from 0 to 300 Mbit/s (with 300 Mbit/s - included). - - The minimum unit is 50 Mbit/s if the allowed bandwidth - size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s - included). - - The minimum unit is 500 Mbit/s if the allowed bandwidth - size is greater than 1000 Mbit/s. - type: int - returned: success - id: - description: - - Specifies the ID of dedicated bandwidth. - type: str - returned: success - enterprise_project_id: - description: - - Specifies the enterprise project ID. - type: str - returned: success - ip_version: - description: - - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this - parameter is left blank, an IPv4 address will be assigned. - type: int - returned: success - ipv4_address: - description: - - Specifies the obtained IPv4 EIP. The system automatically assigns - an EIP if you do not specify it. - type: str - returned: success - port_id: - description: - - Specifies the port ID. This parameter is returned only when a - private IP address is bound with the EIP. - type: str - returned: success - shared_bandwidth_id: - description: - - Specifies the ID of shared bandwidth. - type: str - returned: success - create_time: - description: - - Specifies the time (UTC time) when the EIP was assigned. - type: str - returned: success - ipv6_address: - description: - - Specifies the obtained IPv6 EIP. - type: str - returned: success - private_ip_address: - description: - - Specifies the private IP address bound with the EIP. This - parameter is returned only when a private IP address is bound - with the EIP. - type: str - returned: success -''' +RETURN = r""" +type: + description: + - Specifies the EIP type. + type: str + returned: success +dedicated_bandwidth: + description: + - Specifies the dedicated bandwidth object. + type: dict + returned: success + contains: + charge_mode: + description: + - Specifies whether the bandwidth is billed by traffic or by bandwidth size. The value can be bandwidth or traffic. + If this parameter is left blank or is null character string, default value bandwidth is used. For IPv6 addresses, + the default parameter value is bandwidth outside China and is traffic in China. + type: str + returned: success + name: + description: + - Specifies the bandwidth name. The value is a string of 1 to 64 characters that can contain letters, digits, underscores + (V(_)), hyphens (V(-)), and periods (V(.)). + type: str + returned: success + size: + description: + - Specifies the bandwidth size. The value ranges from 1 Mbit/s to 2000 Mbit/s by default. (The specific range may + vary depending on the configuration in each region. You can see the bandwidth range of each region on the management + console.) The minimum unit for bandwidth adjustment varies depending on the bandwidth range. The details are as + follows:. + - The minimum unit is 1 Mbit/s if the allowed bandwidth size ranges from 0 to 300 Mbit/s (with 300 Mbit/s included). + - The minimum unit is 50 Mbit/s if the allowed bandwidth size ranges 300 Mbit/s to 1000 Mbit/s (with 1000 Mbit/s included). + - The minimum unit is 500 Mbit/s if the allowed bandwidth size is greater than 1000 Mbit/s. + type: int + returned: success + id: + description: + - Specifies the ID of dedicated bandwidth. + type: str + returned: success +enterprise_project_id: + description: + - Specifies the enterprise project ID. + type: str + returned: success +ip_version: + description: + - The value can be 4 (IPv4 address) or 6 (IPv6 address). If this parameter is left blank, an IPv4 address is assigned. + type: int + returned: success +ipv4_address: + description: + - Specifies the obtained IPv4 EIP. The system automatically assigns an EIP if you do not specify it. + type: str + returned: success +port_id: + description: + - Specifies the port ID. This parameter is returned only when a private IP address is bound with the EIP. + type: str + returned: success +shared_bandwidth_id: + description: + - Specifies the ID of shared bandwidth. + type: str + returned: success +create_time: + description: + - Specifies the time (UTC time) when the EIP was assigned. + type: str + returned: success +ipv6_address: + description: + - Specifies the obtained IPv6 EIP. + type: str + returned: success +private_ip_address: + description: + - Specifies the private IP address bound with the EIP. This parameter is returned only when a private IP address is bound + with the EIP. + type: str + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcClientException404, HwcModule, @@ -541,8 +513,7 @@ def async_wait_create(config, result, client, timeout): path_parameters = { "publicip_id": ["publicip", "id"], } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} url = build_path(module, "publicips/{publicip_id}", data) diff --git a/plugins/modules/hwc_vpc_peering_connect.py b/plugins/modules/hwc_vpc_peering_connect.py index 01c52932ba..e5d410c327 100644 --- a/plugins/modules/hwc_vpc_peering_connect.py +++ b/plugins/modules/hwc_vpc_peering_connect.py @@ -1,85 +1,85 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (C) 2019 Huawei # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or SPDX-License-Identifier: GPL-3.0-or-later # https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations ############################################################################### # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_vpc_peering_connect description: - - vpc peering management. -short_description: Creates a resource of Vpc/PeeringConnect in Huawei Cloud + - VPC peering management. +short_description: Creates a resource of VPC/PeeringConnect in Huawei Cloud version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: - description: - - The timeouts for each operations. - type: dict - default: {} - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '15m' - local_vpc_id: - description: - - Specifies the ID of local VPC. - type: str - required: true - name: - description: - - Specifies the name of the VPC peering connection. The value can - contain 1 to 64 characters. - type: str - required: true - peering_vpc: - description: - - Specifies information about the peering VPC. - type: dict - required: true - suboptions: - vpc_id: - description: - - Specifies the ID of peering VPC. - type: str - required: true - project_id: - description: - - Specifies the ID of the project which the peering vpc - belongs to. - type: str - required: false + state: description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: description: - - The description of vpc peering connection. + - The timeouts for create operation. + type: str + default: '15m' + local_vpc_id: + description: + - Specifies the ID of local VPC. + type: str + required: true + name: + description: + - Specifies the name of the VPC peering connection. The value can contain 1 to 64 characters. + type: str + required: true + peering_vpc: + description: + - Specifies information about the peering VPC. + type: dict + required: true + suboptions: + vpc_id: + description: + - Specifies the ID of peering VPC. + type: str + required: true + project_id: + description: + - Specifies the ID of the project which the peering vpc belongs to. type: str required: false + description: + description: + - The description of vpc peering connection. + type: str + required: false extends_documentation_fragment: -- community.general.hwc + - community.general.hwc + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # create a peering connect - name: Create a local vpc hwc_network_vpc: @@ -97,43 +97,41 @@ EXAMPLES = ''' name: "ansible_network_peering_test" peering_vpc: vpc_id: "{{ vpc2.id }}" -''' +""" -RETURN = ''' - local_vpc_id: - description: - - Specifies the ID of local VPC. - type: str - returned: success - name: - description: - - Specifies the name of the VPC peering connection. The value can - contain 1 to 64 characters. - type: str - returned: success - peering_vpc: - description: - - Specifies information about the peering VPC. - type: dict - returned: success - contains: - vpc_id: - description: - - Specifies the ID of peering VPC. - type: str - returned: success - project_id: - description: - - Specifies the ID of the project which the peering vpc - belongs to. - type: str - returned: success - description: - description: - - The description of vpc peering connection. - type: str - returned: success -''' +RETURN = r""" +local_vpc_id: + description: + - Specifies the ID of local VPC. + type: str + returned: success +name: + description: + - Specifies the name of the VPC peering connection. The value can contain 1 to 64 characters. + type: str + returned: success +peering_vpc: + description: + - Specifies information about the peering VPC. + type: dict + returned: success + contains: + vpc_id: + description: + - Specifies the ID of peering VPC. + type: str + returned: success + project_id: + description: + - Specifies the ID of the project which the peering vpc belongs to. + type: str + returned: success +description: + description: + - The description of vpc peering connection. + type: str + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcClientException404, HwcModule, @@ -401,8 +399,7 @@ def async_wait_create(config, result, client, timeout): path_parameters = { "peering_id": ["peering", "id"], } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} url = build_path(module, "v2.0/vpc/peerings/{peering_id}", data) diff --git a/plugins/modules/hwc_vpc_port.py b/plugins/modules/hwc_vpc_port.py index aac9636f88..54bea0f249 100644 --- a/plugins/modules/hwc_vpc_port.py +++ b/plugins/modules/hwc_vpc_port.py @@ -1,115 +1,114 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (C) 2019 Huawei # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations ############################################################################### # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_vpc_port description: - - vpc port management. -short_description: Creates a resource of Vpc/Port in Huawei Cloud + - VPC port management. +short_description: Creates a resource of VPC/Port in Huawei Cloud version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: description: - - Whether the given object should exist in Huawei Cloud. + - The timeouts for create operation. type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: + default: '15m' + subnet_id: + description: + - Specifies the ID of the subnet to which the port belongs. + type: str + required: true + admin_state_up: + description: + - Specifies the administrative state of the port. + type: bool + required: false + allowed_address_pairs: + description: + - Specifies a set of zero or more allowed address pairs. + required: false + type: list + elements: dict + suboptions: + ip_address: description: - - The timeouts for each operations. - type: dict - default: {} - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '15m' - subnet_id: - description: - - Specifies the ID of the subnet to which the port belongs. - type: str - required: true - admin_state_up: - description: - - Specifies the administrative state of the port. - type: bool - required: false - allowed_address_pairs: - description: - - Specifies a set of zero or more allowed address pairs. - required: false - type: list - elements: dict - suboptions: - ip_address: - description: - - Specifies the IP address. It cannot set it to 0.0.0.0. - Configure an independent security group for the port if a - large CIDR block (subnet mask less than 24) is configured - for parameter allowed_address_pairs. - type: str - required: false - mac_address: - description: - - Specifies the MAC address. - type: str - required: false - extra_dhcp_opts: - description: - - Specifies the extended option of DHCP. - type: list - elements: dict - required: false - suboptions: - name: - description: - - Specifies the option name. - type: str - required: false - value: - description: - - Specifies the option value. - type: str - required: false - ip_address: - description: - - Specifies the port IP address. + - Specifies the IP address. It cannot set it to 0.0.0.0. Configure an independent security group for the port if + a large CIDR block (subnet mask less than 24) is configured for parameter allowed_address_pairs. type: str required: false - name: + mac_address: description: - - Specifies the port name. The value can contain no more than 255 - characters. + - Specifies the MAC address. type: str required: false - security_groups: + extra_dhcp_opts: + description: + - Specifies the extended option of DHCP. + type: list + elements: dict + required: false + suboptions: + name: description: - - Specifies the ID of the security group. - type: list - elements: str + - Specifies the option name. + type: str required: false + value: + description: + - Specifies the option value. + type: str + required: false + ip_address: + description: + - Specifies the port IP address. + type: str + required: false + name: + description: + - Specifies the port name. The value can contain no more than 255 characters. + type: str + required: false + security_groups: + description: + - Specifies the ID of the security group. + type: list + elements: str + required: false extends_documentation_fragment: -- community.general.hwc + - community.general.hwc + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # create a port - name: Create vpc hwc_network_vpc: @@ -128,76 +127,73 @@ EXAMPLES = ''' community.general.hwc_vpc_port: subnet_id: "{{ subnet.id }}" ip_address: "192.168.100.33" -''' +""" -RETURN = ''' - subnet_id: - description: - - Specifies the ID of the subnet to which the port belongs. - type: str - returned: success - admin_state_up: - description: - - Specifies the administrative state of the port. - type: bool - returned: success - allowed_address_pairs: - description: - - Specifies a set of zero or more allowed address pairs. - type: list - returned: success - contains: - ip_address: - description: - - Specifies the IP address. It cannot set it to 0.0.0.0. - Configure an independent security group for the port if a - large CIDR block (subnet mask less than 24) is configured - for parameter allowed_address_pairs. - type: str - returned: success - mac_address: - description: - - Specifies the MAC address. - type: str - returned: success - extra_dhcp_opts: - description: - - Specifies the extended option of DHCP. - type: list - returned: success - contains: - name: - description: - - Specifies the option name. - type: str - returned: success - value: - description: - - Specifies the option value. - type: str - returned: success +RETURN = r""" +subnet_id: + description: + - Specifies the ID of the subnet to which the port belongs. + type: str + returned: success +admin_state_up: + description: + - Specifies the administrative state of the port. + type: bool + returned: success +allowed_address_pairs: + description: + - Specifies a set of zero or more allowed address pairs. + type: list + returned: success + contains: ip_address: - description: - - Specifies the port IP address. - type: str - returned: success - name: - description: - - Specifies the port name. The value can contain no more than 255 - characters. - type: str - returned: success - security_groups: - description: - - Specifies the ID of the security group. - type: list - returned: success + description: + - Specifies the IP address. It cannot set it to 0.0.0.0. Configure an independent security group for the port if a + large CIDR block (subnet mask less than 24) is configured for parameter allowed_address_pairs. + type: str + returned: success mac_address: - description: - - Specifies the port MAC address. - type: str - returned: success -''' + description: + - Specifies the MAC address. + type: str + returned: success +extra_dhcp_opts: + description: + - Specifies the extended option of DHCP. + type: list + returned: success + contains: + name: + description: + - Specifies the option name. + type: str + returned: success + value: + description: + - Specifies the option value. + type: str + returned: success +ip_address: + description: + - Specifies the port IP address. + type: str + returned: success +name: + description: + - Specifies the port name. The value can contain no more than 255 characters. + type: str + returned: success +security_groups: + description: + - Specifies the ID of the security group. + type: list + returned: success +mac_address: + description: + - Specifies the port MAC address. + type: str + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcClientException404, HwcModule, @@ -554,8 +550,7 @@ def async_wait_create(config, result, client, timeout): path_parameters = { "port_id": ["port", "id"], } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} url = build_path(module, "ports/{port_id}", data) diff --git a/plugins/modules/hwc_vpc_private_ip.py b/plugins/modules/hwc_vpc_private_ip.py index e05c14f74d..664b4c84e4 100644 --- a/plugins/modules/hwc_vpc_private_ip.py +++ b/plugins/modules/hwc_vpc_private_ip.py @@ -1,59 +1,60 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (C) 2019 Huawei # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations ############################################################################### # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_vpc_private_ip description: - - vpc private ip management. -short_description: Creates a resource of Vpc/PrivateIP in Huawei Cloud + - VPC private IP management. +short_description: Creates a resource of VPC/PrivateIP in Huawei Cloud notes: - - If I(id) option is provided, it takes precedence over I(subnet_id), I(ip_address) for private ip selection. - - I(subnet_id), I(ip_address) are used for private ip selection. If more than one private ip with this options exists, execution is aborted. - - No parameter support updating. If one of option is changed, the module will create a new resource. + - If O(id) option is provided, it takes precedence over O(subnet_id), O(ip_address) for private IP selection. + - O(subnet_id), O(ip_address) are used for private IP selection. If more than one private IP with this options exists, execution + is aborted. + - No parameter support updating. If one of option is changed, the module creates a new resource. version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - subnet_id: - description: - - Specifies the ID of the subnet from which IP addresses are - assigned. Cannot be changed after creating the private ip. - type: str - required: true - ip_address: - description: - - Specifies the target IP address. The value can be an available IP - address in the subnet. If it is not specified, the system - automatically assigns an IP address. Cannot be changed after - creating the private ip. - type: str - required: false + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + subnet_id: + description: + - Specifies the ID of the subnet from which IP addresses are assigned. Cannot be changed after creating the private + IP. + type: str + required: true + ip_address: + description: + - Specifies the target IP address. The value can be an available IP address in the subnet. If it is not specified, the + system automatically assigns an IP address. Cannot be changed after creating the private IP. + type: str + required: false extends_documentation_fragment: -- community.general.hwc + - community.general.hwc + - community.general.attributes +""" -''' - -EXAMPLES = ''' -# create a private ip +EXAMPLES = r""" +# create a private IP - name: Create vpc hwc_network_vpc: cidr: "192.168.100.0/24" @@ -67,27 +68,25 @@ EXAMPLES = ''' vpc_id: "{{ vpc.id }}" cidr: "192.168.100.0/26" register: subnet -- name: Create a private ip +- name: Create a private IP community.general.hwc_vpc_private_ip: subnet_id: "{{ subnet.id }}" ip_address: "192.168.100.33" -''' +""" -RETURN = ''' - subnet_id: - description: - - Specifies the ID of the subnet from which IP addresses are - assigned. - type: str - returned: success - ip_address: - description: - - Specifies the target IP address. The value can be an available IP - address in the subnet. If it is not specified, the system - automatically assigns an IP address. - type: str - returned: success -''' +RETURN = r""" +subnet_id: + description: + - Specifies the ID of the subnet from which IP addresses are assigned. + type: str + returned: success +ip_address: + description: + - Specifies the target IP address. The value can be an available IP address in the subnet. If it is not specified, the + system automatically assigns an IP address. + type: str + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcModule, are_different_dicts, build_path, diff --git a/plugins/modules/hwc_vpc_route.py b/plugins/modules/hwc_vpc_route.py index e08a9ebf38..dfb1aea61b 100644 --- a/plugins/modules/hwc_vpc_route.py +++ b/plugins/modules/hwc_vpc_route.py @@ -1,65 +1,68 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (C) 2019 Huawei # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations ############################################################################### # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_vpc_route description: - - vpc route management. -short_description: Creates a resource of Vpc/Route in Huawei Cloud + - VPC route management. +short_description: Creates a resource of VPC/Route in Huawei Cloud notes: - - If I(id) option is provided, it takes precedence over I(destination), I(vpc_id), I(type) and I(next_hop) for route selection. - - I(destination), I(vpc_id), I(type) and I(next_hop) are used for route selection. If more than one route with this options exists, execution is aborted. - - No parameter support updating. If one of option is changed, the module will create a new resource. + - If O(id) option is provided, it takes precedence over O(destination), O(vpc_id), O(type), and O(next_hop) for route selection. + - O(destination), O(vpc_id), O(type) and O(next_hop) are used for route selection. If more than one route with this options + exists, execution is aborted. + - No parameter support updating. If one of option is changed, the module creates a new resource. version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - destination: - description: - - Specifies the destination IP address or CIDR block. - type: str - required: true - next_hop: - description: - - Specifies the next hop. The value is VPC peering connection ID. - type: str - required: true - vpc_id: - description: - - Specifies the VPC ID to which route is added. - type: str - required: true - type: - description: - - Specifies the type of route. - type: str - required: false - default: 'peering' + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + destination: + description: + - Specifies the destination IP address or CIDR block. + type: str + required: true + next_hop: + description: + - Specifies the next hop. The value is VPC peering connection ID. + type: str + required: true + vpc_id: + description: + - Specifies the VPC ID to which route is added. + type: str + required: true + type: + description: + - Specifies the type of route. + type: str + required: false + default: 'peering' extends_documentation_fragment: -- community.general.hwc + - community.general.hwc + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # create a peering connect - name: Create a local vpc hwc_network_vpc: @@ -85,35 +88,35 @@ EXAMPLES = ''' vpc_id: "{{ vpc1.id }}" destination: "192.168.0.0/16" next_hop: "{{ connect.id }}" -''' +""" -RETURN = ''' - id: - description: - - UUID of the route. - type: str - returned: success - destination: - description: - - Specifies the destination IP address or CIDR block. - type: str - returned: success - next_hop: - description: - - Specifies the next hop. The value is VPC peering connection ID. - type: str - returned: success - vpc_id: - description: - - Specifies the VPC ID to which route is added. - type: str - returned: success - type: - description: - - Specifies the type of route. - type: str - returned: success -''' +RETURN = r""" +id: + description: + - UUID of the route. + type: str + returned: success +destination: + description: + - Specifies the destination IP address or CIDR block. + type: str + returned: success +next_hop: + description: + - Specifies the next hop. The value is VPC peering connection ID. + type: str + returned: success +vpc_id: + description: + - Specifies the VPC ID to which route is added. + type: str + returned: success +type: + description: + - Specifies the type of route. + type: str + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcModule, are_different_dicts, build_path, diff --git a/plugins/modules/hwc_vpc_security_group.py b/plugins/modules/hwc_vpc_security_group.py index 2338623890..d73318666c 100644 --- a/plugins/modules/hwc_vpc_security_group.py +++ b/plugins/modules/hwc_vpc_security_group.py @@ -1,167 +1,150 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (C) 2019 Huawei # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations ############################################################################### # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_vpc_security_group description: - - vpc security group management. -short_description: Creates a resource of Vpc/SecurityGroup in Huawei Cloud + - VPC security group management. +short_description: Creates a resource of VPC/SecurityGroup in Huawei Cloud notes: - - If I(id) option is provided, it takes precedence over I(name), - I(enterprise_project_id) and I(vpc_id) for security group selection. - - I(name), I(enterprise_project_id) and I(vpc_id) are used for security - group selection. If more than one security group with this options exists, - execution is aborted. - - No parameter support updating. If one of option is changed, the module - will create a new resource. + - If O(id) option is provided, it takes precedence over O(name), O(enterprise_project_id), and O(vpc_id) for security group + selection. + - O(name), O(enterprise_project_id) and O(vpc_id) are used for security group selection. If more than one security group + with this options exists, execution is aborted. + - No parameter support updating. If one of option is changed, the module creates a new resource. version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - name: - description: - - Specifies the security group name. The value is a string of 1 to - 64 characters that can contain letters, digits, underscores C(_), - hyphens (-), and periods (.). - type: str - required: true - enterprise_project_id: - description: - - Specifies the enterprise project ID. When creating a security - group, associate the enterprise project ID with the security - group.s - type: str - required: false - vpc_id: - description: - - Specifies the resource ID of the VPC to which the security group - belongs. - type: str - required: false + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + name: + description: + - Specifies the security group name. The value is a string of 1 to 64 characters that can contain letters, digits, underscores + (V(_)), hyphens (V(-)), and periods (V(.)). + type: str + required: true + enterprise_project_id: + description: + - Specifies the enterprise project ID. When creating a security group, associate the enterprise project ID with the + security group.s. + type: str + required: false + vpc_id: + description: + - Specifies the resource ID of the VPC to which the security group belongs. + type: str + required: false extends_documentation_fragment: -- community.general.hwc + - community.general.hwc + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # create a security group - name: Create a security group community.general.hwc_vpc_security_group: name: "ansible_network_security_group_test" -''' +""" -RETURN = ''' - name: - description: - - Specifies the security group name. The value is a string of 1 to - 64 characters that can contain letters, digits, underscores C(_), - hyphens (-), and periods (.). - type: str - returned: success - enterprise_project_id: - description: - - Specifies the enterprise project ID. When creating a security - group, associate the enterprise project ID with the security - group. - type: str - returned: success - vpc_id: - description: - - Specifies the resource ID of the VPC to which the security group - belongs. - type: str - returned: success - rules: - description: - - Specifies the security group rule, which ensures that resources - in the security group can communicate with one another. - type: complex - returned: success - contains: - description: - description: - - Provides supplementary information about the security - group rule. - type: str - returned: success - direction: - description: - - Specifies the direction of access control. The value can - be egress or ingress. - type: str - returned: success - ethertype: - description: - - Specifies the IP protocol version. The value can be IPv4 - or IPv6. - type: str - returned: success - id: - description: - - Specifies the security group rule ID. - type: str - returned: success - port_range_max: - description: - - Specifies the end port number. The value ranges from 1 to - 65535. If the protocol is not icmp, the value cannot be - smaller than the port_range_min value. An empty value - indicates all ports. - type: int - returned: success - port_range_min: - description: - - Specifies the start port number. The value ranges from 1 - to 65535. The value cannot be greater than the - port_range_max value. An empty value indicates all ports. - type: int - returned: success - protocol: - description: - - Specifies the protocol type. The value can be icmp, tcp, - udp, or others. If the parameter is left blank, the - security group supports all protocols. - type: str - returned: success - remote_address_group_id: - description: - - Specifies the ID of remote IP address group. - type: str - returned: success - remote_group_id: - description: - - Specifies the ID of the peer security group. - type: str - returned: success - remote_ip_prefix: - description: - - Specifies the remote IP address. If the access control - direction is set to egress, the parameter specifies the - source IP address. If the access control direction is set - to ingress, the parameter specifies the destination IP - address. - type: str - returned: success -''' +RETURN = r""" +name: + description: + - Specifies the security group name. The value is a string of 1 to 64 characters that can contain letters, digits, underscores + (V(_)), hyphens (V(-)), and periods (V(.)). + type: str + returned: success +enterprise_project_id: + description: + - Specifies the enterprise project ID. When creating a security group, associate the enterprise project ID with the security + group. + type: str + returned: success +vpc_id: + description: + - Specifies the resource ID of the VPC to which the security group belongs. + type: str + returned: success +rules: + description: + - Specifies the security group rule, which ensures that resources in the security group can communicate with one another. + type: complex + returned: success + contains: + description: + description: + - Provides supplementary information about the security group rule. + type: str + returned: success + direction: + description: + - Specifies the direction of access control. The value can be egress or ingress. + type: str + returned: success + ethertype: + description: + - Specifies the IP protocol version. The value can be IPv4 or IPv6. + type: str + returned: success + id: + description: + - Specifies the security group rule ID. + type: str + returned: success + port_range_max: + description: + - Specifies the end port number. The value ranges from 1 to 65535. If the protocol is not icmp, the value cannot be + smaller than the port_range_min value. An empty value indicates all ports. + type: int + returned: success + port_range_min: + description: + - Specifies the start port number. The value ranges from 1 to 65535. The value cannot be greater than the port_range_max + value. An empty value indicates all ports. + type: int + returned: success + protocol: + description: + - Specifies the protocol type. The value can be icmp, tcp, udp, or others. If the parameter is left blank, the security + group supports all protocols. + type: str + returned: success + remote_address_group_id: + description: + - Specifies the ID of remote IP address group. + type: str + returned: success + remote_group_id: + description: + - Specifies the ID of the peer security group. + type: str + returned: success + remote_ip_prefix: + description: + - Specifies the remote IP address. If the access control direction is set to egress, the parameter specifies the source + IP address. If the access control direction is set to ingress, the parameter specifies the destination IP address. + type: str + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcModule, are_different_dicts, build_path, diff --git a/plugins/modules/hwc_vpc_security_group_rule.py b/plugins/modules/hwc_vpc_security_group_rule.py index ca6e2e9de8..153950fb2d 100644 --- a/plugins/modules/hwc_vpc_security_group_rule.py +++ b/plugins/modules/hwc_vpc_security_group_rule.py @@ -1,110 +1,99 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (C) 2019 Huawei # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations ############################################################################### # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_vpc_security_group_rule description: - - vpc security group management. -short_description: Creates a resource of Vpc/SecurityGroupRule in Huawei Cloud + - VPC security group management. +short_description: Creates a resource of VPC/SecurityGroupRule in Huawei Cloud notes: - - If I(id) option is provided, it takes precedence over - I(enterprise_project_id) for security group rule selection. - - I(security_group_id) is used for security group rule selection. If more - than one security group rule with this options exists, execution is - aborted. - - No parameter support updating. If one of option is changed, the module - will create a new resource. + - If O(id) option is provided, it takes precedence over O(security_group_id) for security group rule selection. + - O(security_group_id) is used for security group rule selection. If more than one security group rule with this options + exists, execution is aborted. + - No parameter support updating. If one of option is changed, the module creates a new resource. version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: - description: - - Whether the given object should exist in Huawei Cloud. - type: str - choices: ['present', 'absent'] - default: 'present' - direction: - description: - - Specifies the direction of access control. The value can be - egress or ingress. - type: str - required: true - security_group_id: - description: - - Specifies the security group rule ID, which uniquely identifies - the security group rule. - type: str - required: true + state: description: - description: - - Provides supplementary information about the security group rule. - The value is a string of no more than 255 characters that can - contain letters and digits. - type: str - required: false - ethertype: - description: - - Specifies the IP protocol version. The value can be IPv4 or IPv6. - If you do not set this parameter, IPv4 is used by default. - type: str - required: false - port_range_max: - description: - - Specifies the end port number. The value ranges from 1 to 65535. - If the protocol is not icmp, the value cannot be smaller than the - port_range_min value. An empty value indicates all ports. - type: int - required: false - port_range_min: - description: - - Specifies the start port number. The value ranges from 1 to - 65535. The value cannot be greater than the port_range_max value. - An empty value indicates all ports. - type: int - required: false - protocol: - description: - - Specifies the protocol type. The value can be icmp, tcp, or udp. - If the parameter is left blank, the security group supports all - protocols. - type: str - required: false - remote_group_id: - description: - - Specifies the ID of the peer security group. The value is - exclusive with parameter remote_ip_prefix. - type: str - required: false - remote_ip_prefix: - description: - - Specifies the remote IP address. If the access control direction - is set to egress, the parameter specifies the source IP address. - If the access control direction is set to ingress, the parameter - specifies the destination IP address. The value can be in the - CIDR format or IP addresses. The parameter is exclusive with - parameter remote_group_id. - type: str - required: false + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + direction: + description: + - Specifies the direction of access control. The value can be egress or ingress. + type: str + required: true + security_group_id: + description: + - Specifies the security group rule ID, which uniquely identifies the security group rule. + type: str + required: true + description: + description: + - Provides supplementary information about the security group rule. The value is a string of no more than 255 characters + that can contain letters and digits. + type: str + required: false + ethertype: + description: + - Specifies the IP protocol version. The value can be IPv4 or IPv6. If you do not set this parameter, IPv4 is used by + default. + type: str + required: false + port_range_max: + description: + - Specifies the end port number. The value ranges from 1 to 65535. If the protocol is not icmp, the value cannot be + smaller than the port_range_min value. An empty value indicates all ports. + type: int + required: false + port_range_min: + description: + - Specifies the start port number. The value ranges from 1 to 65535. The value cannot be greater than the port_range_max + value. An empty value indicates all ports. + type: int + required: false + protocol: + description: + - Specifies the protocol type. The value can be icmp, tcp, or udp. If the parameter is left blank, the security group + supports all protocols. + type: str + required: false + remote_group_id: + description: + - Specifies the ID of the peer security group. The value is exclusive with parameter remote_ip_prefix. + type: str + required: false + remote_ip_prefix: + description: + - Specifies the remote IP address. If the access control direction is set to egress, the parameter specifies the source + IP address. If the access control direction is set to ingress, the parameter specifies the destination IP address. + The value can be in the CIDR format or IP addresses. The parameter is exclusive with parameter remote_group_id. + type: str + required: false extends_documentation_fragment: -- community.general.hwc + - community.general.hwc + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # create a security group rule - name: Create a security group hwc_vpc_security_group: @@ -119,72 +108,62 @@ EXAMPLES = ''' security_group_id: "{{ sg.id }}" port_range_min: 22 remote_ip_prefix: "0.0.0.0/0" -''' +""" -RETURN = ''' - direction: - description: - - Specifies the direction of access control. The value can be - egress or ingress. - type: str - returned: success - security_group_id: - description: - - Specifies the security group rule ID, which uniquely identifies - the security group rule. - type: str - returned: success - description: - description: - - Provides supplementary information about the security group rule. - The value is a string of no more than 255 characters that can - contain letters and digits. - type: str - returned: success - ethertype: - description: - - Specifies the IP protocol version. The value can be IPv4 or IPv6. - If you do not set this parameter, IPv4 is used by default. - type: str - returned: success - port_range_max: - description: - - Specifies the end port number. The value ranges from 1 to 65535. - If the protocol is not icmp, the value cannot be smaller than the - port_range_min value. An empty value indicates all ports. - type: int - returned: success - port_range_min: - description: - - Specifies the start port number. The value ranges from 1 to - 65535. The value cannot be greater than the port_range_max value. - An empty value indicates all ports. - type: int - returned: success - protocol: - description: - - Specifies the protocol type. The value can be icmp, tcp, or udp. - If the parameter is left blank, the security group supports all - protocols. - type: str - returned: success - remote_group_id: - description: - - Specifies the ID of the peer security group. The value is - exclusive with parameter remote_ip_prefix. - type: str - returned: success - remote_ip_prefix: - description: - - Specifies the remote IP address. If the access control direction - is set to egress, the parameter specifies the source IP address. - If the access control direction is set to ingress, the parameter - specifies the destination IP address. The value can be in the - CIDR format or IP addresses. The parameter is exclusive with - parameter remote_group_id. - type: str - returned: success -''' +RETURN = r""" +direction: + description: + - Specifies the direction of access control. The value can be egress or ingress. + type: str + returned: success +security_group_id: + description: + - Specifies the security group rule ID, which uniquely identifies the security group rule. + type: str + returned: success +description: + description: + - Provides supplementary information about the security group rule. The value is a string of no more than 255 characters + that can contain letters and digits. + type: str + returned: success +ethertype: + description: + - Specifies the IP protocol version. The value can be IPv4 or IPv6. If you do not set this parameter, IPv4 is used by + default. + type: str + returned: success +port_range_max: + description: + - Specifies the end port number. The value ranges from 1 to 65535. If the protocol is not icmp, the value cannot be smaller + than the port_range_min value. An empty value indicates all ports. + type: int + returned: success +port_range_min: + description: + - Specifies the start port number. The value ranges from 1 to 65535. The value cannot be greater than the port_range_max + value. An empty value indicates all ports. + type: int + returned: success +protocol: + description: + - Specifies the protocol type. The value can be icmp, tcp, or udp. If the parameter is left blank, the security group + supports all protocols. + type: str + returned: success +remote_group_id: + description: + - Specifies the ID of the peer security group. The value is exclusive with parameter remote_ip_prefix. + type: str + returned: success +remote_ip_prefix: + description: + - Specifies the remote IP address. If the access control direction is set to egress, the parameter specifies the source + IP address. If the access control direction is set to ingress, the parameter specifies the destination IP address. The + value can be in the CIDR format or IP addresses. The parameter is exclusive with parameter remote_group_id. + type: str + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcModule, are_different_dicts, build_path, diff --git a/plugins/modules/hwc_vpc_subnet.py b/plugins/modules/hwc_vpc_subnet.py index 4b192a5682..316ed39c1f 100644 --- a/plugins/modules/hwc_vpc_subnet.py +++ b/plugins/modules/hwc_vpc_subnet.py @@ -1,104 +1,99 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (C) 2019 Huawei # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations ############################################################################### # Documentation ############################################################################### -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: hwc_vpc_subnet description: - - subnet management. -short_description: Creates a resource of Vpc/Subnet in Huawei Cloud + - Subnet management. +short_description: Creates a resource of VPC/Subnet in Huawei Cloud version_added: '0.2.0' author: Huawei Inc. (@huaweicloud) requirements: - - keystoneauth1 >= 3.6.0 + - keystoneauth1 >= 3.6.0 +attributes: + check_mode: + support: full + diff_mode: + support: none options: - state: + state: + description: + - Whether the given object should exist in Huawei Cloud. + type: str + choices: ['present', 'absent'] + default: 'present' + timeouts: + description: + - The timeouts for each operations. + type: dict + default: {} + suboptions: + create: description: - - Whether the given object should exist in Huawei Cloud. + - The timeouts for create operation. type: str - choices: ['present', 'absent'] - default: 'present' - timeouts: + default: '15m' + update: description: - - The timeouts for each operations. - type: dict - default: {} - suboptions: - create: - description: - - The timeouts for create operation. - type: str - default: '15m' - update: - description: - - The timeouts for update operation. - type: str - default: '15m' - cidr: - description: - - Specifies the subnet CIDR block. The value must be within the VPC - CIDR block and be in CIDR format. The subnet mask cannot be - greater than 28. Cannot be changed after creating the subnet. + - The timeouts for update operation. type: str - required: true - gateway_ip: - description: - - Specifies the gateway of the subnet. The value must be an IP - address in the subnet. Cannot be changed after creating the subnet. - type: str - required: true - name: - description: - - Specifies the subnet name. The value is a string of 1 to 64 - characters that can contain letters, digits, underscores C(_), - hyphens (-), and periods (.). - type: str - required: true - vpc_id: - description: - - Specifies the ID of the VPC to which the subnet belongs. Cannot - be changed after creating the subnet. - type: str - required: true - availability_zone: - description: - - Specifies the AZ to which the subnet belongs. Cannot be changed - after creating the subnet. - type: str - required: false - dhcp_enable: - description: - - Specifies whether DHCP is enabled for the subnet. The value can - be true (enabled) or false(disabled), and default value is true. - If this parameter is set to false, newly created ECSs cannot - obtain IP addresses, and usernames and passwords cannot be - injected using Cloud-init. - type: bool - required: false - dns_address: - description: - - Specifies the DNS server addresses for subnet. The address - in the head will be used first. - type: list - elements: str - required: false + default: '15m' + cidr: + description: + - Specifies the subnet CIDR block. The value must be within the VPC CIDR block and be in CIDR format. The subnet mask + cannot be greater than 28. Cannot be changed after creating the subnet. + type: str + required: true + gateway_ip: + description: + - Specifies the gateway of the subnet. The value must be an IP address in the subnet. Cannot be changed after creating + the subnet. + type: str + required: true + name: + description: + - Specifies the subnet name. The value is a string of 1 to 64 characters that can contain letters, digits, underscores + (V(_)), hyphens (V(-)), and periods (V(.)). + type: str + required: true + vpc_id: + description: + - Specifies the ID of the VPC to which the subnet belongs. Cannot be changed after creating the subnet. + type: str + required: true + availability_zone: + description: + - Specifies the AZ to which the subnet belongs. Cannot be changed after creating the subnet. + type: str + required: false + dhcp_enable: + description: + - Specifies whether DHCP is enabled for the subnet. The value can be true (enabled) or false(disabled), and default + value is true. If this parameter is set to false, newly created ECSs cannot obtain IP addresses, and usernames and + passwords cannot be injected using Cloud-init. + type: bool + required: false + dns_address: + description: + - Specifies the DNS server addresses for subnet. The address in the head is used first. + type: list + elements: str + required: false extends_documentation_fragment: -- community.general.hwc + - community.general.hwc + - community.general.attributes +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # create subnet - name: Create vpc hwc_network_vpc: @@ -112,55 +107,49 @@ EXAMPLES = ''' gateway_ip: "192.168.100.32" name: "ansible_network_subnet_test" dhcp_enable: true -''' +""" -RETURN = ''' - cidr: - description: - - Specifies the subnet CIDR block. The value must be within the VPC - CIDR block and be in CIDR format. The subnet mask cannot be - greater than 28. - type: str - returned: success - gateway_ip: - description: - - Specifies the gateway of the subnet. The value must be an IP - address in the subnet. - type: str - returned: success - name: - description: - - Specifies the subnet name. The value is a string of 1 to 64 - characters that can contain letters, digits, underscores C(_), - hyphens (-), and periods (.). - type: str - returned: success - vpc_id: - description: - - Specifies the ID of the VPC to which the subnet belongs. - type: str - returned: success - availability_zone: - description: - - Specifies the AZ to which the subnet belongs. - type: str - returned: success - dhcp_enable: - description: - - Specifies whether DHCP is enabled for the subnet. The value can - be true (enabled) or false(disabled), and default value is true. - If this parameter is set to false, newly created ECSs cannot - obtain IP addresses, and usernames and passwords cannot be - injected using Cloud-init. - type: bool - returned: success - dns_address: - description: - - Specifies the DNS server addresses for subnet. The address - in the head will be used first. - type: list - returned: success -''' +RETURN = r""" +cidr: + description: + - Specifies the subnet CIDR block. The value must be within the VPC CIDR block and be in CIDR format. The subnet mask + cannot be greater than 28. + type: str + returned: success +gateway_ip: + description: + - Specifies the gateway of the subnet. The value must be an IP address in the subnet. + type: str + returned: success +name: + description: + - Specifies the subnet name. The value is a string of 1 to 64 characters that can contain letters, digits, underscores + (V(_)), hyphens (V(-)), and periods (V(.)). + type: str + returned: success +vpc_id: + description: + - Specifies the ID of the VPC to which the subnet belongs. + type: str + returned: success +availability_zone: + description: + - Specifies the AZ to which the subnet belongs. + type: str + returned: success +dhcp_enable: + description: + - Specifies whether DHCP is enabled for the subnet. The value can be true (enabled) or false(disabled), and default value + is true. If this parameter is set to false, newly created ECSs cannot obtain IP addresses, and usernames and passwords + cannot be injected using Cloud-init. + type: bool + returned: success +dns_address: + description: + - Specifies the DNS server addresses for subnet. The address in the head is used first. + type: list + returned: success +""" from ansible_collections.community.general.plugins.module_utils.hwc_utils import ( Config, HwcClientException, HwcClientException404, HwcModule, @@ -434,8 +423,7 @@ def async_wait_create(config, result, client, timeout): path_parameters = { "subnet_id": ["subnet", "id"], } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} url = build_path(module, "subnets/{subnet_id}", data) @@ -532,8 +520,7 @@ def async_wait_update(config, result, client, timeout): path_parameters = { "subnet_id": ["subnet", "id"], } - data = dict((key, navigate_value(result, path)) - for key, path in path_parameters.items()) + data = {key: navigate_value(result, path) for key, path in path_parameters.items()} url = build_path(module, "subnets/{subnet_id}", data) diff --git a/plugins/modules/ibm_sa_domain.py b/plugins/modules/ibm_sa_domain.py index 26e4b7e47c..f377bce761 100644 --- a/plugins/modules/ibm_sa_domain.py +++ b/plugins/modules/ibm_sa_domain.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2018, IBM CORPORATION # Author(s): Tzur Eliyahu @@ -7,89 +6,92 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ibm_sa_domain short_description: Manages domains on IBM Spectrum Accelerate Family storage systems description: - - "This module can be used to add domains to or removes them from IBM Spectrum Accelerate Family storage systems." + - This module can be used to add domains to or removes them from IBM Spectrum Accelerate Family storage systems. +attributes: + check_mode: + support: none + diff_mode: + support: none options: - domain: - description: - - Name of the domain to be managed. - required: true - type: str - state: - description: - - The desired state of the domain. - default: "present" - choices: [ "present", "absent" ] - type: str - ldap_id: - description: - - ldap id to add to the domain. - required: false - type: str - size: - description: - - Size of the domain. - required: false - type: str - hard_capacity: - description: - - Hard capacity of the domain. - required: false - type: str - soft_capacity: - description: - - Soft capacity of the domain. - required: false - type: str - max_cgs: - description: - - Number of max cgs. - required: false - type: str - max_dms: - description: - - Number of max dms. - required: false - type: str - max_mirrors: - description: - - Number of max_mirrors. - required: false - type: str - max_pools: - description: - - Number of max_pools. - required: false - type: str - max_volumes: - description: - - Number of max_volumes. - required: false - type: str - perf_class: - description: - - Add the domain to a performance class. - required: false - type: str + domain: + description: + - Name of the domain to be managed. + required: true + type: str + state: + description: + - The desired state of the domain. + default: "present" + choices: ["present", "absent"] + type: str + ldap_id: + description: + - LDAP ID to add to the domain. + required: false + type: str + size: + description: + - Size of the domain. + required: false + type: str + hard_capacity: + description: + - Hard capacity of the domain. + required: false + type: str + soft_capacity: + description: + - Soft capacity of the domain. + required: false + type: str + max_cgs: + description: + - Number of max cgs. + required: false + type: str + max_dms: + description: + - Number of max dms. + required: false + type: str + max_mirrors: + description: + - Number of max_mirrors. + required: false + type: str + max_pools: + description: + - Number of max_pools. + required: false + type: str + max_volumes: + description: + - Number of max_volumes. + required: false + type: str + perf_class: + description: + - Add the domain to a performance class. + required: false + type: str extends_documentation_fragment: -- community.general.ibm_storage - + - community.general.ibm_storage + - community.general.attributes author: - - Tzur Eliyahu (@tzure) -''' + - Tzur Eliyahu (@tzure) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Define new domain. community.general.ibm_sa_domain: domain: domain_name @@ -106,14 +108,14 @@ EXAMPLES = ''' username: admin password: secret endpoints: hostdev-system -''' -RETURN = ''' +""" +RETURN = r""" msg: - description: module return status. - returned: as needed - type: str - sample: "domain 'domain_name' created successfully." -''' + description: Module return status. + returned: as needed + type: str + sample: "domain 'domain_name' created successfully." +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ diff --git a/plugins/modules/ibm_sa_host.py b/plugins/modules/ibm_sa_host.py index 961e1bba19..17615390f0 100644 --- a/plugins/modules/ibm_sa_host.py +++ b/plugins/modules/ibm_sa_host.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (C) 2018 IBM CORPORATION # Author(s): Tzur Eliyahu @@ -7,63 +6,63 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ibm_sa_host short_description: Adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems description: - - "This module adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems." + - This module adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems. +attributes: + check_mode: + support: none + diff_mode: + support: none options: - host: - description: - - Host name. - required: true - type: str - state: - description: - - Host state. - default: "present" - choices: [ "present", "absent" ] - type: str - cluster: - description: - - The name of the cluster to include the host. - required: false - type: str - domain: - description: - - The domains the cluster will be attached to. - To include more than one domain, - separate domain names with commas. - To include all existing domains, use an asterisk ("*"). - required: false - type: str - iscsi_chap_name: - description: - - The host's CHAP name identifier - required: false - type: str - iscsi_chap_secret: - description: - - The password of the initiator used to - authenticate to the system when CHAP is enable - required: false - type: str + host: + description: + - Host name. + required: true + type: str + state: + description: + - Host state. + default: "present" + choices: ["present", "absent"] + type: str + cluster: + description: + - The name of the cluster to include the host. + required: false + type: str + domain: + description: + - The domains the cluster is attached to. To include more than one domain, separate domain names with commas. To include + all existing domains, use an asterisk (V(*)). + required: false + type: str + iscsi_chap_name: + description: + - The host's CHAP name identifier. + required: false + type: str + iscsi_chap_secret: + description: + - The password of the initiator used to authenticate to the system when CHAP is enable. + required: false + type: str extends_documentation_fragment: -- community.general.ibm_storage - + - community.general.ibm_storage + - community.general.attributes author: - - Tzur Eliyahu (@tzure) -''' + - Tzur Eliyahu (@tzure) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Define new host. community.general.ibm_sa_host: host: host_name @@ -79,9 +78,9 @@ EXAMPLES = ''' username: admin password: secret endpoints: hostdev-system -''' -RETURN = ''' -''' +""" +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ diff --git a/plugins/modules/ibm_sa_host_ports.py b/plugins/modules/ibm_sa_host_ports.py index fc543053a7..4c5b2b2d04 100644 --- a/plugins/modules/ibm_sa_host_ports.py +++ b/plugins/modules/ibm_sa_host_ports.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (C) 2018 IBM CORPORATION # Author(s): Tzur Eliyahu @@ -7,54 +6,57 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ibm_sa_host_ports short_description: Add host ports on IBM Spectrum Accelerate Family storage systems description: - - "This module adds ports to or removes them from the hosts - on IBM Spectrum Accelerate Family storage systems." + - This module adds ports to or removes them from the hosts on IBM Spectrum Accelerate Family storage systems. +attributes: + check_mode: + support: none + diff_mode: + support: none options: - host: - description: - - Host name. - required: true - type: str - state: - description: - - Host ports state. - default: "present" - choices: [ "present", "absent" ] - type: str - iscsi_name: - description: - - iSCSI initiator name. - required: false - type: str - fcaddress: - description: - - Fiber channel address. - required: false - type: str - num_of_visible_targets: - description: - - Number of visible targets. - required: false - type: str + host: + description: + - Host name. + required: true + type: str + state: + description: + - Host ports state. + default: "present" + choices: ["present", "absent"] + type: str + iscsi_name: + description: + - The iSCSI initiator name. + required: false + type: str + fcaddress: + description: + - Fiber channel address. + required: false + type: str + num_of_visible_targets: + description: + - Number of visible targets. + required: false + type: str extends_documentation_fragment: -- community.general.ibm_storage + - community.general.ibm_storage + - community.general.attributes author: - - Tzur Eliyahu (@tzure) -''' + - Tzur Eliyahu (@tzure) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Add ports for host. community.general.ibm_sa_host_ports: host: test_host @@ -72,10 +74,9 @@ EXAMPLES = ''' password: secret endpoints: hostdev-system state: absent - -''' -RETURN = ''' -''' +""" +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command, connect_ssl, diff --git a/plugins/modules/ibm_sa_pool.py b/plugins/modules/ibm_sa_pool.py index 998f3f74be..bb7102fa71 100644 --- a/plugins/modules/ibm_sa_pool.py +++ b/plugins/modules/ibm_sa_pool.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (C) 2018 IBM CORPORATION # Author(s): Tzur Eliyahu @@ -7,59 +6,62 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ibm_sa_pool short_description: Handles pools on IBM Spectrum Accelerate Family storage systems description: - - "This module creates or deletes pools to be used on IBM Spectrum Accelerate Family storage systems" + - This module creates or deletes pools to be used on IBM Spectrum Accelerate Family storage systems. +attributes: + check_mode: + support: none + diff_mode: + support: none options: - pool: - description: - - Pool name. - required: true - type: str - state: - description: - - Pool state. - default: "present" - choices: [ "present", "absent" ] - type: str - size: - description: - - Pool size in GB - required: false - type: str - snapshot_size: - description: - - Pool snapshot size in GB - required: false - type: str - domain: - description: - - Adds the pool to the specified domain. - required: false - type: str - perf_class: - description: - - Assigns a perf_class to the pool. - required: false - type: str + pool: + description: + - Pool name. + required: true + type: str + state: + description: + - Pool state. + default: "present" + choices: ["present", "absent"] + type: str + size: + description: + - Pool size in GB. + required: false + type: str + snapshot_size: + description: + - Pool snapshot size in GB. + required: false + type: str + domain: + description: + - Adds the pool to the specified domain. + required: false + type: str + perf_class: + description: + - Assigns a perf_class to the pool. + required: false + type: str extends_documentation_fragment: -- community.general.ibm_storage - + - community.general.ibm_storage + - community.general.attributes author: - - Tzur Eliyahu (@tzure) -''' + - Tzur Eliyahu (@tzure) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create new pool. community.general.ibm_sa_pool: name: pool_name @@ -76,9 +78,9 @@ EXAMPLES = ''' username: admin password: secret endpoints: hostdev-system -''' -RETURN = ''' -''' +""" +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ diff --git a/plugins/modules/ibm_sa_vol.py b/plugins/modules/ibm_sa_vol.py index 115ac9169f..48450084e2 100644 --- a/plugins/modules/ibm_sa_vol.py +++ b/plugins/modules/ibm_sa_vol.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (C) 2018 IBM CORPORATION # Author(s): Tzur Eliyahu @@ -7,49 +6,52 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ibm_sa_vol short_description: Handle volumes on IBM Spectrum Accelerate Family storage systems description: - - "This module creates or deletes volumes to be used on IBM Spectrum Accelerate Family storage systems." + - This module creates or deletes volumes to be used on IBM Spectrum Accelerate Family storage systems. +attributes: + check_mode: + support: none + diff_mode: + support: none options: - vol: - description: - - Volume name. - required: true - type: str - pool: - description: - - Volume pool. - required: false - type: str - state: - description: - - Volume state. - default: "present" - choices: [ "present", "absent" ] - type: str - size: - description: - - Volume size. - required: false - type: str + vol: + description: + - Volume name. + required: true + type: str + pool: + description: + - Volume pool. + required: false + type: str + state: + description: + - Volume state. + default: "present" + choices: ["present", "absent"] + type: str + size: + description: + - Volume size. + required: false + type: str extends_documentation_fragment: -- community.general.ibm_storage - + - community.general.ibm_storage + - community.general.attributes author: - - Tzur Eliyahu (@tzure) -''' + - Tzur Eliyahu (@tzure) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a new volume. community.general.ibm_sa_vol: vol: volume_name @@ -67,9 +69,9 @@ EXAMPLES = ''' username: admin password: secret endpoints: hostdev-system -''' -RETURN = ''' -''' +""" +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import execute_pyxcli_command, \ diff --git a/plugins/modules/ibm_sa_vol_map.py b/plugins/modules/ibm_sa_vol_map.py index f493a2d979..03c87ca37b 100644 --- a/plugins/modules/ibm_sa_vol_map.py +++ b/plugins/modules/ibm_sa_vol_map.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (C) 2018 IBM CORPORATION # Author(s): Tzur Eliyahu @@ -7,62 +6,63 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ibm_sa_vol_map short_description: Handles volume mapping on IBM Spectrum Accelerate Family storage systems description: - - "This module maps volumes to or unmaps them from the hosts on - IBM Spectrum Accelerate Family storage systems." + - This module maps volumes to or unmaps them from the hosts on IBM Spectrum Accelerate Family storage systems. +attributes: + check_mode: + support: none + diff_mode: + support: none options: - vol: - description: - - Volume name. - required: true - type: str - state: - default: "present" - choices: [ "present", "absent" ] - description: - - When the state is present the volume is mapped. - When the state is absent, the volume is meant to be unmapped. - type: str + vol: + description: + - Volume name. + required: true + type: str + state: + default: "present" + choices: ["present", "absent"] + description: + - When the state is present the volume is mapped. When the state is absent, the volume is meant to be unmapped. + type: str - cluster: - description: - - Maps the volume to a cluster. - required: false - type: str - host: - description: - - Maps the volume to a host. - required: false - type: str - lun: - description: - - The LUN identifier. - required: false - type: str - override: - description: - - Overrides the existing volume mapping. - required: false - type: str + cluster: + description: + - Maps the volume to a cluster. + required: false + type: str + host: + description: + - Maps the volume to a host. + required: false + type: str + lun: + description: + - The LUN identifier. + required: false + type: str + override: + description: + - Overrides the existing volume mapping. + required: false + type: str extends_documentation_fragment: -- community.general.ibm_storage - + - community.general.ibm_storage + - community.general.attributes author: - - Tzur Eliyahu (@tzure) -''' + - Tzur Eliyahu (@tzure) +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Map volume to host. community.general.ibm_sa_vol_map: vol: volume_name @@ -90,9 +90,9 @@ EXAMPLES = ''' password: secret endpoints: hostdev-system state: absent -''' -RETURN = ''' -''' +""" +RETURN = r""" +""" from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ibm_sa_utils import (execute_pyxcli_command, diff --git a/plugins/modules/icinga2_feature.py b/plugins/modules/icinga2_feature.py index 2f1d5629d5..6899fe2e23 100644 --- a/plugins/modules/icinga2_feature.py +++ b/plugins/modules/icinga2_feature.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2016, Loic Blot # Copyright (c) 2018, Ansible Project @@ -9,36 +8,41 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: icinga2_feature short_description: Manage Icinga2 feature description: - - This module can be used to enable or disable an Icinga2 feature. + - This module can be used to enable or disable an Icinga2 feature. author: "Loic Blot (@nerzhul)" +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: - name: - type: str - description: + name: + type: str + description: - This is the feature name to enable or disable. - required: true - state: - type: str - description: - - If set to C(present) and feature is disabled, then feature is enabled. - - If set to C(present) and feature is already enabled, then nothing is changed. - - If set to C(absent) and feature is enabled, then feature is disabled. - - If set to C(absent) and feature is already disabled, then nothing is changed. - choices: [ "present", "absent" ] - default: present -''' + required: true + state: + type: str + description: + - If set to V(present) and feature is disabled, then feature is enabled. + - If set to V(present) and feature is already enabled, then nothing is changed. + - If set to V(absent) and feature is enabled, then feature is disabled. + - If set to V(absent) and feature is already disabled, then nothing is changed. + choices: ["present", "absent"] + default: present +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Enable ido-pgsql feature community.general.icinga2_feature: name: ido-pgsql @@ -48,11 +52,11 @@ EXAMPLES = ''' community.general.icinga2_feature: name: api state: absent -''' +""" -RETURN = ''' +RETURN = r""" # -''' +""" import re from ansible.module_utils.basic import AnsibleModule diff --git a/plugins/modules/icinga2_host.py b/plugins/modules/icinga2_host.py index 0f4e2b26a0..39a7b48a6d 100644 --- a/plugins/modules/icinga2_host.py +++ b/plugins/modules/icinga2_host.py @@ -1,5 +1,4 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # This module is proudly sponsored by CGI (www.cgi.com) and # KPN (www.kpn.com). @@ -7,70 +6,69 @@ # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: icinga2_host short_description: Manage a host in Icinga2 description: - - "Add or remove a host to Icinga2 through the API." - - "See U(https://www.icinga.com/docs/icinga2/latest/doc/12-icinga2-api/)" + - Add or remove a host to Icinga2 through the API. + - See U(https://www.icinga.com/docs/icinga2/latest/doc/12-icinga2-api/). author: "Jurgen Brand (@t794104)" +attributes: + check_mode: + support: full + diff_mode: + support: none options: url: type: str description: - - HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path + - HTTP, HTTPS, or FTP URL in the form V((http|https|ftp\)://[user[:pass]]@host.domain[:port]/path). use_proxy: description: - - If C(false), it will not use a proxy, even if one is defined in - an environment variable on the target hosts. + - If V(false), it does not use a proxy, even if one is defined in an environment variable on the target hosts. type: bool default: true validate_certs: description: - - If C(false), SSL certificates will not be validated. This should only be used - on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates are not validated. This should only be used on personally controlled sites using self-signed + certificates. type: bool default: true url_username: type: str description: - The username for use in HTTP basic authentication. - - This parameter can be used without C(url_password) for sites that allow empty passwords. + - This parameter can be used without O(url_password) for sites that allow empty passwords. url_password: type: str description: - - The password for use in HTTP basic authentication. - - If the C(url_username) parameter is not specified, the C(url_password) parameter will not be used. + - The password for use in HTTP basic authentication. + - If the O(url_username) parameter is not specified, the O(url_password) parameter is not used. force_basic_auth: description: - - httplib2, the library used by the uri module only sends authentication information when a webservice - responds to an initial request with a 401 status. Since some basic auth services do not properly - send a 401, logins will fail. This option forces the sending of the Basic authentication header - upon initial request. + - C(httplib2), the library used by Ansible's HTTP request code only sends authentication information when a webservice + responds to an initial request with a 401 status. Since some basic auth services do not properly send a 401, logins + may fail. This option forces the sending of the Basic authentication header upon initial request. type: bool default: false client_cert: type: path description: - - PEM formatted certificate chain file to be used for SSL client - authentication. This file can also include the key as well, and if - the key is included, C(client_key) is not required. + - PEM formatted certificate chain file to be used for SSL client authentication. This file can also include the key + as well, and if the key is included, O(client_key) is not required. client_key: type: path description: - - PEM formatted file that contains your private key to be used for SSL - client authentication. If C(client_cert) contains both the certificate - and key, this option is not required. + - PEM formatted file that contains your private key to be used for SSL client authentication. If O(client_cert) contains + both the certificate and key, this option is not required. state: type: str description: - Apply feature state. - choices: [ "present", "absent" ] + choices: ["present", "absent"] default: present name: type: str @@ -96,21 +94,22 @@ options: type: str description: - The name used to display the host. - - If not specified, it defaults to the value of the I(name) parameter. + - If not specified, it defaults to the value of the O(name) parameter. ip: type: str description: - The IP address of the host. - required: true + - This is no longer required since community.general 8.0.0. variables: type: dict description: - Dictionary of variables. extends_documentation_fragment: - - url -''' + - ansible.builtin.url + - community.general.attributes +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Add host to icinga community.general.icinga2_host: url: "https://icinga2.example.com" @@ -122,18 +121,18 @@ EXAMPLES = ''' variables: foo: "bar" delegate_to: 127.0.0.1 -''' +""" -RETURN = ''' +RETURN = r""" name: - description: The name used to create, modify or delete the host - type: str - returned: always + description: The name used to create, modify or delete the host. + type: str + returned: always data: - description: The data structure used for create, modify or delete of the host - type: dict - returned: always -''' + description: The data structure used for create, modify or delete of the host. + type: dict + returned: always +""" import json @@ -234,11 +233,11 @@ def main(): state=dict(default="present", choices=["absent", "present"]), name=dict(required=True, aliases=['host']), zone=dict(), - template=dict(default=None), + template=dict(), check_command=dict(default="hostalive"), - display_name=dict(default=None), - ip=dict(required=True), - variables=dict(type='dict', default=None), + display_name=dict(), + ip=dict(), + variables=dict(type='dict'), ) # Define the main module @@ -250,9 +249,9 @@ def main(): state = module.params["state"] name = module.params["name"] zone = module.params["zone"] - template = [name] + template = [] if module.params["template"]: - template.append(module.params["template"]) + template = [module.params["template"]] check_command = module.params["check_command"] ip = module.params["ip"] display_name = module.params["display_name"] @@ -267,20 +266,16 @@ def main(): module.fail_json(msg="unable to connect to Icinga. Exception message: %s" % (e)) data = { + 'templates': template, 'attrs': { 'address': ip, 'display_name': display_name, 'check_command': check_command, 'zone': zone, - 'vars': { - 'made_by': "ansible", - }, - 'templates': template, + 'vars.made_by': "ansible" } } - - if variables: - data['attrs']['vars'].update(variables) + data['attrs'].update({'vars.' + key: value for key, value in variables.items()}) changed = False if icinga.exists(name): @@ -302,7 +297,7 @@ def main(): module.exit_json(changed=False, name=name, data=data) # Template attribute is not allowed in modification - del data['attrs']['templates'] + del data['templates'] ret = icinga.modify(name, data) diff --git a/plugins/modules/idrac_redfish_command.py b/plugins/modules/idrac_redfish_command.py index 9e523b6d11..b60126764a 100644 --- a/plugins/modules/idrac_redfish_command.py +++ b/plugins/modules/idrac_redfish_command.py @@ -1,21 +1,25 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2018 Dell EMC Inc. # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: idrac_redfish_command short_description: Manages Out-Of-Band controllers using iDRAC OEM Redfish APIs description: - - Builds Redfish URIs locally and sends them to remote OOB controllers to - perform an action. + - Builds Redfish URIs locally and sends them to remote OOB controllers to perform an action. - For use with Dell iDRAC operations that require Redfish OEM extensions. +extends_documentation_fragment: + - community.general.attributes + - community.general.redfish +attributes: + check_mode: + support: none + diff_mode: + support: none options: category: required: true @@ -57,32 +61,44 @@ options: - ID of the System, Manager or Chassis to modify. type: str version_added: '0.2.0' + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + ciphers: + version_added: 10.6.0 author: "Jose Delarosa (@jose-delarosa)" -''' +""" -EXAMPLES = ''' - - name: Create BIOS configuration job (schedule BIOS setting update) - community.general.idrac_redfish_command: - category: Systems - command: CreateBiosConfigJob - resource_id: System.Embedded.1 - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" -''' +EXAMPLES = r""" +- name: Create BIOS configuration job (schedule BIOS setting update) + community.general.idrac_redfish_command: + category: Systems + command: CreateBiosConfigJob + resource_id: System.Embedded.1 + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" +""" -RETURN = ''' +RETURN = r""" msg: - description: Message with action result or error description - returned: always - type: str - sample: "Action was successful" -''' + description: Message with action result or error description. + returned: always + type: str + sample: "Action was successful" +return_values: + description: Dictionary containing command-specific response data from the action. + returned: on success + type: dict + version_added: 6.6.0 + sample: {"job_id": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_471269252011"} +""" import re from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC from ansible.module_utils.common.text.converters import to_native @@ -121,10 +137,9 @@ class IdracRedfishUtils(RedfishUtils): return response response_output = response['resp'].__dict__ - job_id = response_output["headers"]["Location"] - job_id = re.search("JID_.+", job_id).group() - # Currently not passing job_id back to user but patch is coming - return {'ret': True, 'msg': "Config job %s created" % job_id} + job_id_full = response_output["headers"]["Location"] + job_id = re.search("JID_.+", job_id_full).group() + return {'ret': True, 'msg': "Config job %s created" % job_id, 'job_id': job_id_full} CATEGORY_COMMANDS_ALL = { @@ -136,17 +151,20 @@ CATEGORY_COMMANDS_ALL = { def main(): result = {} + return_values = {} + argument_spec = dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + timeout=dict(type='int', default=10), + resource_id=dict() + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( - argument_spec=dict( - category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - timeout=dict(type='int', default=10), - resource_id=dict() - ), + argument_spec, required_together=[ ('username', 'password'), ], @@ -192,7 +210,20 @@ def main(): if category == "Systems": # execute only if we find a System resource + # NOTE: Currently overriding the usage of 'data_modification' due to + # how 'resource_id' is processed. In the case of CreateBiosConfigJob, + # we interact with BOTH systems and managers, so you currently cannot + # specify a single 'resource_id' to make both '_find_systems_resource' + # and '_find_managers_resource' return success. Since + # CreateBiosConfigJob doesn't use the matched 'resource_id' for a + # system regardless of what's specified, disabling the 'resource_id' + # inspection for the next call allows a specific manager to be + # specified with 'resource_id'. If we ever need to expand the input + # to inspect a specific system and manager in parallel, this will need + # updates. + rf_utils.data_modification = False result = rf_utils._find_systems_resource() + rf_utils.data_modification = True if result['ret'] is False: module.fail_json(msg=to_native(result['msg'])) @@ -203,11 +234,13 @@ def main(): if result['ret'] is False: module.fail_json(msg=to_native(result['msg'])) result = rf_utils.create_bios_config_job() + if 'job_id' in result: + return_values['job_id'] = result['job_id'] # Return data back or fail with proper message if result['ret'] is True: del result['ret'] - module.exit_json(changed=True, msg='Action was successful') + module.exit_json(changed=True, msg='Action was successful', return_values=return_values) else: module.fail_json(msg=to_native(result['msg'])) diff --git a/plugins/modules/idrac_redfish_config.py b/plugins/modules/idrac_redfish_config.py index f995258c7d..e7d6250624 100644 --- a/plugins/modules/idrac_redfish_config.py +++ b/plugins/modules/idrac_redfish_config.py @@ -1,21 +1,25 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2019 Dell EMC Inc. # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: idrac_redfish_config short_description: Manages servers through iDRAC using Dell Redfish APIs description: - - For use with Dell iDRAC operations that require Redfish OEM extensions - - Builds Redfish URIs locally and sends them to remote iDRAC controllers to - set or update a configuration attribute. + - For use with Dell iDRAC operations that require Redfish OEM extensions. + - Builds Redfish URIs locally and sends them to remote iDRAC controllers to set or update a configuration attribute. +extends_documentation_fragment: + - community.general.attributes + - community.general.redfish +attributes: + check_mode: + support: none + diff_mode: + support: none options: category: required: true @@ -26,9 +30,8 @@ options: required: true description: - List of commands to execute on iDRAC. - - I(SetManagerAttributes), I(SetLifecycleControllerAttributes) and - I(SetSystemAttributes) are mutually exclusive commands when C(category) - is I(Manager). + - V(SetManagerAttributes), V(SetLifecycleControllerAttributes) and V(SetSystemAttributes) are mutually exclusive commands + when O(category) is V(Manager). type: list elements: str baseuri: @@ -67,90 +70,96 @@ options: - ID of the System, Manager or Chassis to modify. type: str version_added: '0.2.0' + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + ciphers: + version_added: 10.6.0 author: "Jose Delarosa (@jose-delarosa)" -''' +""" -EXAMPLES = ''' - - name: Enable NTP and set NTP server and Time zone attributes in iDRAC - community.general.idrac_redfish_config: - category: Manager - command: SetManagerAttributes - resource_id: iDRAC.Embedded.1 - manager_attributes: - NTPConfigGroup.1.NTPEnable: "Enabled" - NTPConfigGroup.1.NTP1: "{{ ntpserver1 }}" - Time.1.Timezone: "{{ timezone }}" - baseuri: "{{ baseuri }}" - username: "{{ username}}" - password: "{{ password }}" +EXAMPLES = r""" +- name: Enable NTP and set NTP server and Time zone attributes in iDRAC + community.general.idrac_redfish_config: + category: Manager + command: SetManagerAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + NTPConfigGroup.1.NTPEnable: "Enabled" + NTPConfigGroup.1.NTP1: "{{ ntpserver1 }}" + Time.1.Timezone: "{{ timezone }}" + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" - - name: Enable Syslog and set Syslog servers in iDRAC - community.general.idrac_redfish_config: - category: Manager - command: SetManagerAttributes - resource_id: iDRAC.Embedded.1 - manager_attributes: - SysLog.1.SysLogEnable: "Enabled" - SysLog.1.Server1: "{{ syslog_server1 }}" - SysLog.1.Server2: "{{ syslog_server2 }}" - baseuri: "{{ baseuri }}" - username: "{{ username}}" - password: "{{ password }}" +- name: Enable Syslog and set Syslog servers in iDRAC + community.general.idrac_redfish_config: + category: Manager + command: SetManagerAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + SysLog.1.SysLogEnable: "Enabled" + SysLog.1.Server1: "{{ syslog_server1 }}" + SysLog.1.Server2: "{{ syslog_server2 }}" + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" - - name: Configure SNMP community string, port, protocol and trap format - community.general.idrac_redfish_config: - category: Manager - command: SetManagerAttributes - resource_id: iDRAC.Embedded.1 - manager_attributes: - SNMP.1.AgentEnable: "Enabled" - SNMP.1.AgentCommunity: "public_community_string" - SNMP.1.TrapFormat: "SNMPv1" - SNMP.1.SNMPProtocol: "All" - SNMP.1.DiscoveryPort: 161 - SNMP.1.AlertPort: 162 - baseuri: "{{ baseuri }}" - username: "{{ username}}" - password: "{{ password }}" +- name: Configure SNMP community string, port, protocol and trap format + community.general.idrac_redfish_config: + category: Manager + command: SetManagerAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + SNMP.1.AgentEnable: "Enabled" + SNMP.1.AgentCommunity: "public_community_string" + SNMP.1.TrapFormat: "SNMPv1" + SNMP.1.SNMPProtocol: "All" + SNMP.1.DiscoveryPort: 161 + SNMP.1.AlertPort: 162 + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" - - name: Enable CSIOR - community.general.idrac_redfish_config: - category: Manager - command: SetLifecycleControllerAttributes - resource_id: iDRAC.Embedded.1 - manager_attributes: - LCAttributes.1.CollectSystemInventoryOnRestart: "Enabled" - baseuri: "{{ baseuri }}" - username: "{{ username}}" - password: "{{ password }}" +- name: Enable CSIOR + community.general.idrac_redfish_config: + category: Manager + command: SetLifecycleControllerAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + LCAttributes.1.CollectSystemInventoryOnRestart: "Enabled" + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" - - name: Set Power Supply Redundancy Policy to A/B Grid Redundant - community.general.idrac_redfish_config: - category: Manager - command: SetSystemAttributes - resource_id: iDRAC.Embedded.1 - manager_attributes: - ServerPwr.1.PSRedPolicy: "A/B Grid Redundant" - baseuri: "{{ baseuri }}" - username: "{{ username}}" - password: "{{ password }}" -''' +- name: Set Power Supply Redundancy Policy to A/B Grid Redundant + community.general.idrac_redfish_config: + category: Manager + command: SetSystemAttributes + resource_id: iDRAC.Embedded.1 + manager_attributes: + ServerPwr.1.PSRedPolicy: "A/B Grid Redundant" + baseuri: "{{ baseuri }}" + username: "{{ username}}" + password: "{{ password }}" +""" -RETURN = ''' +RETURN = r""" msg: - description: Message with action result or error description - returned: always - type: str - sample: "Action was successful" -''' + description: Message with action result or error description. + returned: always + type: str + sample: "Action was successful" +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.validation import ( check_mutually_exclusive, check_required_arguments ) -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC from ansible.module_utils.common.text.converters import to_native @@ -196,14 +205,14 @@ class IdracRedfishUtils(RedfishUtils): for attr_name, attr_value in attributes.items(): # Check if attribute exists - if attr_name not in data[u'Attributes']: + if attr_name not in data['Attributes']: # Skip and proceed to next attribute if this isn't valid attrs_bad.update({attr_name: attr_value}) continue # Find out if value is already set to what we want. If yes, exclude # those attributes - if data[u'Attributes'][attr_name] == attr_value: + if data['Attributes'][attr_name] == attr_value: attrs_skipped.update({attr_name: attr_value}) else: attrs_to_patch.update({attr_name: attr_value}) @@ -242,18 +251,20 @@ CATEGORY_COMMANDS_MUTUALLY_EXCLUSIVE = { def main(): result = {} + argument_spec = dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + manager_attributes=dict(type='dict', default={}), + timeout=dict(type='int', default=10), + resource_id=dict() + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( - argument_spec=dict( - category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - manager_attributes=dict(type='dict', default={}), - timeout=dict(type='int', default=10), - resource_id=dict() - ), + argument_spec, required_together=[ ('username', 'password'), ], diff --git a/plugins/modules/idrac_redfish_info.py b/plugins/modules/idrac_redfish_info.py index cb44a75961..309cefc15f 100644 --- a/plugins/modules/idrac_redfish_info.py +++ b/plugins/modules/idrac_redfish_info.py @@ -1,26 +1,25 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2019 Dell EMC Inc. # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: idrac_redfish_info short_description: Gather PowerEdge server information through iDRAC using Redfish APIs description: - - Builds Redfish URIs locally and sends them to remote iDRAC controllers to - get information back. + - Builds Redfish URIs locally and sends them to remote iDRAC controllers to get information back. - For use with Dell EMC iDRAC operations that require Redfish OEM extensions. - - This module was called C(idrac_redfish_facts) before Ansible 2.9, returning C(ansible_facts). - Note that the M(community.general.idrac_redfish_info) module no longer returns C(ansible_facts)! extends_documentation_fragment: - community.general.attributes - community.general.attributes.info_module + - community.general.redfish +attributes: + check_mode: + version_added: 3.3.0 + # This was backported to 2.5.4 and 1.3.11 as well, since this was a bugfix options: category: required: true @@ -31,8 +30,7 @@ options: required: true description: - List of commands to execute on iDRAC. - - C(GetManagerAttributes) returns the list of dicts containing iDRAC, - LifecycleController and System attributes. + - V(GetManagerAttributes) returns the list of dicts containing iDRAC, LifecycleController and System attributes. type: list elements: str baseuri: @@ -58,72 +56,80 @@ options: - Timeout in seconds for HTTP requests to iDRAC. default: 10 type: int + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + ciphers: + version_added: 10.6.0 author: "Jose Delarosa (@jose-delarosa)" -''' +""" -EXAMPLES = ''' - - name: Get Manager attributes with a default of 20 seconds - community.general.idrac_redfish_info: - category: Manager - command: GetManagerAttributes - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - timeout: 20 - register: result +EXAMPLES = r""" +- name: Get Manager attributes with a default of 20 seconds + community.general.idrac_redfish_info: + category: Manager + command: GetManagerAttributes + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + timeout: 20 + register: result - # Examples to display the value of all or a single iDRAC attribute - - name: Store iDRAC attributes as a fact variable - ansible.builtin.set_fact: - idrac_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'iDRACAttributes') | list | first }}" +# Examples to display the value of all or a single iDRAC attribute +- name: Store iDRAC attributes as a fact variable + ansible.builtin.set_fact: + idrac_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'iDRACAttributes') + | list | first }}" - - name: Display all iDRAC attributes - ansible.builtin.debug: - var: idrac_attributes +- name: Display all iDRAC attributes + ansible.builtin.debug: + var: idrac_attributes - - name: Display the value of 'Syslog.1.SysLogEnable' iDRAC attribute - ansible.builtin.debug: - var: idrac_attributes['Syslog.1.SysLogEnable'] +- name: Display the value of 'Syslog.1.SysLogEnable' iDRAC attribute + ansible.builtin.debug: + var: idrac_attributes['Syslog.1.SysLogEnable'] - # Examples to display the value of all or a single LifecycleController attribute - - name: Store LifecycleController attributes as a fact variable - ansible.builtin.set_fact: - lc_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'LCAttributes') | list | first }}" +# Examples to display the value of all or a single LifecycleController attribute +- name: Store LifecycleController attributes as a fact variable + ansible.builtin.set_fact: + lc_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'LCAttributes') + | list | first }}" - - name: Display LifecycleController attributes - ansible.builtin.debug: - var: lc_attributes +- name: Display LifecycleController attributes + ansible.builtin.debug: + var: lc_attributes - - name: Display the value of 'CollectSystemInventoryOnRestart' attribute - ansible.builtin.debug: - var: lc_attributes['LCAttributes.1.CollectSystemInventoryOnRestart'] +- name: Display the value of 'CollectSystemInventoryOnRestart' attribute + ansible.builtin.debug: + var: lc_attributes['LCAttributes.1.CollectSystemInventoryOnRestart'] - # Examples to display the value of all or a single System attribute - - name: Store System attributes as a fact variable - ansible.builtin.set_fact: - system_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'SystemAttributes') | list | first }}" +# Examples to display the value of all or a single System attribute +- name: Store System attributes as a fact variable + ansible.builtin.set_fact: + system_attributes: "{{ result.redfish_facts.entries | selectattr('Id', 'defined') | selectattr('Id', 'equalto', 'SystemAttributes') + | list | first }}" - - name: Display System attributes - ansible.builtin.debug: - var: system_attributes +- name: Display System attributes + ansible.builtin.debug: + var: system_attributes - - name: Display the value of 'PSRedPolicy' - ansible.builtin.debug: - var: system_attributes['ServerPwr.1.PSRedPolicy'] +- name: Display the value of 'PSRedPolicy' + ansible.builtin.debug: + var: system_attributes['ServerPwr.1.PSRedPolicy'] +""" -''' - -RETURN = ''' +RETURN = r""" msg: - description: different results depending on task - returned: always - type: dict - sample: List of Manager attributes -''' + description: Different results depending on task. + returned: always + type: dict + sample: List of Manager attributes +""" from ansible.module_utils.basic import AnsibleModule -from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils +from ansible_collections.community.general.plugins.module_utils.redfish_utils import RedfishUtils, REDFISH_COMMON_ARGUMENT_SPEC from ansible.module_utils.common.text.converters import to_native @@ -143,8 +149,8 @@ class IdracRedfishUtils(RedfishUtils): # Manager attributes are supported as part of iDRAC OEM extension # Attributes are supported only on iDRAC9 try: - for members in data[u'Links'][u'Oem'][u'Dell'][u'DellAttributes']: - attributes_uri = members[u'@odata.id'] + for members in data['Links']['Oem']['Dell']['DellAttributes']: + attributes_uri = members['@odata.id'] response = self.get_request(self.root_uri + attributes_uri) if response['ret'] is False: @@ -176,16 +182,18 @@ CATEGORY_COMMANDS_ALL = { def main(): result = {} + argument_spec = dict( + category=dict(required=True), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + timeout=dict(type='int', default=10) + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( - argument_spec=dict( - category=dict(required=True), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - timeout=dict(type='int', default=10) - ), + argument_spec, required_together=[ ('username', 'password'), ], diff --git a/plugins/modules/ilo_redfish_command.py b/plugins/modules/ilo_redfish_command.py new file mode 100644 index 0000000000..7f20a45631 --- /dev/null +++ b/plugins/modules/ilo_redfish_command.py @@ -0,0 +1,181 @@ +#!/usr/bin/python +# Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved. +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later +from __future__ import annotations + +DOCUMENTATION = r""" +module: ilo_redfish_command +short_description: Manages Out-Of-Band controllers using Redfish APIs +version_added: 6.6.0 +description: + - Builds Redfish URIs locally and sends them to remote OOB controllers to perform an action. +attributes: + check_mode: + support: none + diff_mode: + support: none +extends_documentation_fragment: + - community.general.attributes + - community.general.redfish +options: + category: + required: true + description: + - Category to execute on OOB controller. + type: str + choices: ['Systems'] + command: + required: true + description: + - List of commands to execute on OOB controller. + type: list + elements: str + baseuri: + required: true + description: + - Base URI of OOB controller. + type: str + username: + required: false + description: + - Username for authenticating to iLO. + type: str + password: + required: false + description: + - Password for authenticating to iLO. + type: str + auth_token: + required: false + description: + - Security token for authenticating to iLO. + type: str + timeout: + required: false + description: + - Timeout in seconds for HTTP requests to iLO. + default: 60 + type: int + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + ciphers: + version_added: 10.6.0 +author: + - Varni H P (@varini-hp) +""" + +EXAMPLES = r""" +- name: Wait for iLO Reboot Completion + community.general.ilo_redfish_command: + category: Systems + command: WaitforiLORebootCompletion + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" +""" + +RETURN = r""" +ilo_redfish_command: + description: Returns the status of the operation performed on the iLO. + type: dict + contains: + WaitforiLORebootCompletion: + description: Returns the output msg and whether the function executed successfully. + type: dict + contains: + ret: + description: Return V(true)/V(false) based on whether the operation was performed successfully. + type: bool + msg: + description: Status of the operation performed on the iLO. + type: str + returned: always +""" + +# More will be added as module features are expanded +CATEGORY_COMMANDS_ALL = { + "Systems": ["WaitforiLORebootCompletion"] +} + +from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils +from ansible_collections.community.general.plugins.module_utils.redfish_utils import REDFISH_COMMON_ARGUMENT_SPEC +from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.common.text.converters import to_native + + +def main(): + result = {} + argument_spec = dict( + category=dict(required=True, choices=list(CATEGORY_COMMANDS_ALL.keys())), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + timeout=dict(type="int", default=60), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True) + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) + module = AnsibleModule( + argument_spec, + required_together=[ + ('username', 'password'), + ], + required_one_of=[ + ('username', 'auth_token'), + ], + mutually_exclusive=[ + ('username', 'auth_token'), + ], + supports_check_mode=False + ) + + category = module.params['category'] + command_list = module.params['command'] + + # admin credentials used for authentication + creds = {'user': module.params['username'], + 'pswd': module.params['password'], + 'token': module.params['auth_token']} + + timeout = module.params['timeout'] + + # Build root URI + root_uri = "https://" + module.params['baseuri'] + rf_utils = iLORedfishUtils(creds, root_uri, timeout, module) + + # Check that Category is valid + if category not in CATEGORY_COMMANDS_ALL: + module.fail_json(msg=to_native( + "Invalid Category '%s'. Valid Categories = %s" % (category, list(CATEGORY_COMMANDS_ALL.keys())))) + + # Check that all commands are valid + for cmd in command_list: + # Fail if even one command given is invalid + if cmd not in CATEGORY_COMMANDS_ALL[category]: + module.fail_json( + msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category]))) + + if category == "Systems": + # execute only if we find a System resource + + result = rf_utils._find_systems_resource() + if result['ret'] is False: + module.fail_json(msg=to_native(result['msg'])) + + for command in command_list: + if command == "WaitforiLORebootCompletion": + result[command] = rf_utils.wait_for_ilo_reboot_completion() + + # Return data back or fail with proper message + if not result[command]['ret']: + module.fail_json(msg=result) + + changed = result[command].get('changed', False) + module.exit_json(ilo_redfish_command=result, changed=changed) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/ilo_redfish_config.py b/plugins/modules/ilo_redfish_config.py index 1c68127fa4..5cd441827f 100644 --- a/plugins/modules/ilo_redfish_config.py +++ b/plugins/modules/ilo_redfish_config.py @@ -1,20 +1,24 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved. # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ilo_redfish_config short_description: Sets or updates configuration attributes on HPE iLO with Redfish OEM extensions version_added: 4.2.0 description: - - Builds Redfish URIs locally and sends them to iLO to - set or update a configuration attribute. + - Builds Redfish URIs locally and sends them to iLO to set or update a configuration attribute. - For use with HPE iLO operations that require Redfish OEM extensions. +extends_documentation_fragment: + - community.general.attributes + - community.general.redfish +attributes: + check_mode: + support: none + diff_mode: + support: none options: category: required: true @@ -60,74 +64,82 @@ options: description: - Value of the attribute to be configured. type: str + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + ciphers: + version_added: 10.6.0 author: - - "Bhavya B (@bhavya06)" -''' + - "Bhavya B (@bhavya06)" +""" -EXAMPLES = ''' - - name: Disable WINS Registration - community.general.ilo_redfish_config: - category: Manager - command: SetWINSReg - baseuri: 15.X.X.X - username: Admin - password: Testpass123 - attribute_name: WINSRegistration +EXAMPLES = r""" +- name: Disable WINS Registration + community.general.ilo_redfish_config: + category: Manager + command: SetWINSReg + baseuri: 15.X.X.X + username: Admin + password: Testpass123 + attribute_name: WINSRegistration - - name: Set Time Zone - community.general.ilo_redfish_config: - category: Manager - command: SetTimeZone - baseuri: 15.X.X.X - username: Admin - password: Testpass123 - attribute_name: TimeZone - attribute_value: Chennai +- name: Set Time Zone + community.general.ilo_redfish_config: + category: Manager + command: SetTimeZone + baseuri: 15.X.X.X + username: Admin + password: Testpass123 + attribute_name: TimeZone + attribute_value: Chennai - - name: Set NTP Servers - community.general.ilo_redfish_config: - category: Manager - command: SetNTPServers - baseuri: 15.X.X.X - username: Admin - password: Testpass123 - attribute_name: StaticNTPServers - attribute_value: X.X.X.X +- name: Set NTP Servers + community.general.ilo_redfish_config: + category: Manager + command: SetNTPServers + baseuri: 15.X.X.X + username: Admin + password: Testpass123 + attribute_name: StaticNTPServers + attribute_value: X.X.X.X +""" -''' - -RETURN = ''' +RETURN = r""" msg: - description: Message with action result or error description - returned: always - type: str - sample: "Action was successful" -''' + description: Message with action result or error description. + returned: always + type: str + sample: "Action was successful" +""" CATEGORY_COMMANDS_ALL = { "Manager": ["SetTimeZone", "SetDNSserver", "SetDomainName", "SetNTPServers", "SetWINSReg"] } from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils +from ansible_collections.community.general.plugins.module_utils.redfish_utils import REDFISH_COMMON_ARGUMENT_SPEC from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native def main(): result = {} + argument_spec = dict( + category=dict(required=True, choices=list( + CATEGORY_COMMANDS_ALL.keys())), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + attribute_name=dict(required=True), + attribute_value=dict(type='str'), + timeout=dict(type='int', default=10) + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( - argument_spec=dict( - category=dict(required=True, choices=list( - CATEGORY_COMMANDS_ALL.keys())), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - attribute_name=dict(required=True), - attribute_value=dict(type='str'), - timeout=dict(type='int', default=10) - ), + argument_spec, required_together=[ ('username', 'password'), ], diff --git a/plugins/modules/ilo_redfish_info.py b/plugins/modules/ilo_redfish_info.py index 90cafb8ec6..6eb7d7b3f4 100644 --- a/plugins/modules/ilo_redfish_info.py +++ b/plugins/modules/ilo_redfish_info.py @@ -1,23 +1,20 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2021-2022 Hewlett Packard Enterprise, Inc. All rights reserved. # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: ilo_redfish_info short_description: Gathers server information through iLO using Redfish APIs version_added: 4.2.0 description: - - Builds Redfish URIs locally and sends them to iLO to - get information back. + - Builds Redfish URIs locally and sends them to iLO to get information back. - For use with HPE iLO operations that require Redfish OEM extensions. extends_documentation_fragment: - community.general.attributes - community.general.attributes.info_module + - community.general.redfish options: category: required: true @@ -53,52 +50,58 @@ options: - Timeout in seconds for HTTP requests to iLO. default: 10 type: int + validate_certs: + version_added: 10.6.0 + ca_path: + version_added: 10.6.0 + ciphers: + version_added: 10.6.0 author: - - "Bhavya B (@bhavya06)" -''' + - "Bhavya B (@bhavya06)" +""" -EXAMPLES = ''' - - name: Get iLO Sessions - community.general.ilo_redfish_info: - category: Sessions - command: GetiLOSessions - baseuri: "{{ baseuri }}" - username: "{{ username }}" - password: "{{ password }}" - register: result_sessions -''' +EXAMPLES = r""" +- name: Get iLO Sessions + community.general.ilo_redfish_info: + category: Sessions + command: GetiLOSessions + baseuri: "{{ baseuri }}" + username: "{{ username }}" + password: "{{ password }}" + register: result_sessions +""" -RETURN = ''' +RETURN = r""" ilo_redfish_info: - description: Returns iLO sessions. - type: dict - contains: - GetiLOSessions: - description: Returns the iLO session msg and whether the function executed successfully. - type: dict - contains: - ret: - description: Check variable to see if the information was successfully retrieved. - type: bool - msg: - description: Information of all active iLO sessions. - type: list - elements: dict - contains: - Description: - description: Provides a description of the resource. - type: str - Id: - description: The sessionId. - type: str - Name: - description: The name of the resource. - type: str - UserName: - description: Name to use to log in to the management processor. - type: str - returned: always -''' + description: Returns iLO sessions. + type: dict + contains: + GetiLOSessions: + description: Returns the iLO session msg and whether the function executed successfully. + type: dict + contains: + ret: + description: Check variable to see if the information was successfully retrieved. + type: bool + msg: + description: Information of all active iLO sessions. + type: list + elements: dict + contains: + Description: + description: Provides a description of the resource. + type: str + Id: + description: The sessionId. + type: str + Name: + description: The name of the resource. + type: str + UserName: + description: Name to use to log in to the management processor. + type: str + returned: always +""" CATEGORY_COMMANDS_ALL = { "Sessions": ["GetiLOSessions"] @@ -110,21 +113,24 @@ CATEGORY_COMMANDS_DEFAULT = { from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.general.plugins.module_utils.ilo_redfish_utils import iLORedfishUtils +from ansible_collections.community.general.plugins.module_utils.redfish_utils import REDFISH_COMMON_ARGUMENT_SPEC def main(): result = {} category_list = [] + argument_spec = dict( + category=dict(required=True, type='list', elements='str'), + command=dict(required=True, type='list', elements='str'), + baseuri=dict(required=True), + username=dict(), + password=dict(no_log=True), + auth_token=dict(no_log=True), + timeout=dict(type='int', default=10) + ) + argument_spec.update(REDFISH_COMMON_ARGUMENT_SPEC) module = AnsibleModule( - argument_spec=dict( - category=dict(required=True, type='list', elements='str'), - command=dict(required=True, type='list', elements='str'), - baseuri=dict(required=True), - username=dict(), - password=dict(no_log=True), - auth_token=dict(no_log=True), - timeout=dict(type='int', default=10) - ), + argument_spec, required_together=[ ('username', 'password'), ], diff --git a/plugins/modules/imc_rest.py b/plugins/modules/imc_rest.py index 4d90bcc54d..ef543c62e0 100644 --- a/plugins/modules/imc_rest.py +++ b/plugins/modules/imc_rest.py @@ -1,99 +1,102 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017, Dag Wieers # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: imc_rest short_description: Manage Cisco IMC hardware through its REST API description: -- Provides direct access to the Cisco IMC REST API. -- Perform any configuration changes and actions that the Cisco IMC supports. -- More information about the IMC REST API is available from - U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html) + - Provides direct access to the Cisco IMC REST API. + - Perform any configuration changes and actions that the Cisco IMC supports. + - More information about the IMC REST API is available from + U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html). author: -- Dag Wieers (@dagwieers) + - Dag Wieers (@dagwieers) requirements: -- lxml -- xmljson >= 0.1.8 + - lxml + - xmljson >= 0.1.8 +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: hostname: description: - - IP Address or hostname of Cisco IMC, resolvable by Ansible control host. + - IP Address or hostname of Cisco IMC, resolvable by Ansible control host. required: true - aliases: [ host, ip ] + aliases: [host, ip] type: str username: description: - - Username used to login to the switch. + - Username used to login to the switch. default: admin - aliases: [ user ] + aliases: [user] type: str password: description: - - The password to use for authentication. + - The password to use for authentication. default: password type: str path: description: - - Name of the absolute path of the filename that includes the body - of the http request being sent to the Cisco IMC REST API. - - Parameter C(path) is mutual exclusive with parameter C(content). - aliases: [ 'src', 'config_file' ] + - Name of the absolute path of the filename that includes the body of the http request being sent to the Cisco IMC REST + API. + - Parameter O(path) is mutual exclusive with parameter O(content). + aliases: ['src', 'config_file'] type: path content: description: - - When used instead of C(path), sets the content of the API requests directly. - - This may be convenient to template simple requests, for anything complex use the M(ansible.builtin.template) module. - - You can collate multiple IMC XML fragments and they will be processed sequentially in a single stream, - the Cisco IMC output is subsequently merged. - - Parameter C(content) is mutual exclusive with parameter C(path). + - When used instead of O(path), sets the content of the API requests directly. + - This may be convenient to template simple requests, for anything complex use the M(ansible.builtin.template) module. + - You can collate multiple IMC XML fragments and they are processed sequentially in a single stream, the Cisco IMC output + is subsequently merged. + - Parameter O(content) is mutual exclusive with parameter O(path). type: str protocol: description: - - Connection protocol to use. + - Connection protocol to use. default: https - choices: [ http, https ] + choices: [http, https] type: str timeout: description: - - The socket level timeout in seconds. - - This is the time that every single connection (every fragment) can spend. - If this C(timeout) is reached, the module will fail with a - C(Connection failure) indicating that C(The read operation timed out). + - The socket level timeout in seconds. + - This is the time that every single connection (every fragment) can spend. If this O(timeout) is reached, the module + fails with a C(Connection failure) indicating that C(The read operation timed out). default: 60 type: int validate_certs: description: - - If C(false), SSL certificates will not be validated. - - This should only set to C(false) used on personally controlled sites using self-signed certificates. + - If V(false), SSL certificates are not validated. + - This should only set to V(false) used on personally controlled sites using self-signed certificates. type: bool default: true notes: -- The XML fragments don't need an authentication cookie, this is injected by the module automatically. -- The Cisco IMC XML output is being translated to JSON using the Cobra convention. -- Any configConfMo change requested has a return status of 'modified', even if there was no actual change - from the previous configuration. As a result, this module will always report a change on subsequent runs. - In case this behaviour is fixed in a future update to Cisco IMC, this module will automatically adapt. -- If you get a C(Connection failure) related to C(The read operation timed out) increase the C(timeout) - parameter. Some XML fragments can take longer than the default timeout. -- More information about the IMC REST API is available from - U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html) -''' + - The XML fragments do not need an authentication cookie, this is injected by the module automatically. + - The Cisco IMC XML output is being translated to JSON using the Cobra convention. + - Any configConfMo change requested has a return status of C(modified), even if there was no actual change from the previous + configuration. As a result, this module always reports a change on subsequent runs. In case this behaviour is fixed in + a future update to Cisco IMC, this module is meant to automatically adapt. + - If you get a C(Connection failure) related to C(The read operation timed out) increase the O(timeout) parameter. Some + XML fragments can take longer than the default timeout. + - More information about the IMC REST API is available from + U(http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/c/sw/api/3_0/b_Cisco_IMC_api_301.html). +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Power down server community.general.imc_rest: hostname: '{{ imc_hostname }}' username: '{{ imc_username }}' password: '{{ imc_password }}' - validate_certs: false + validate_certs: false # only do this when you trust the network! content: | @@ -105,7 +108,7 @@ EXAMPLES = r''' hostname: '{{ imc_hostname }}' username: '{{ imc_username }}' password: '{{ imc_password }}' - validate_certs: false + validate_certs: false # only do this when you trust the network! timeout: 120 content: | @@ -130,7 +133,7 @@ EXAMPLES = r''' hostname: '{{ imc_hostname }}' username: '{{ imc_username }}' password: '{{ imc_password }}' - validate_certs: false + validate_certs: false # only do this when you trust the network! content: | @@ -148,7 +151,7 @@ EXAMPLES = r''' hostname: '{{ imc_host }}' username: '{{ imc_username }}' password: '{{ imc_password }}' - validate_certs: false + validate_certs: false # only do this when you trust the network! content: | @@ -160,11 +163,11 @@ EXAMPLES = r''' hostname: '{{ imc_host }}' username: '{{ imc_username }}' password: '{{ imc_password }}' - validate_certs: false + validate_certs: false # only do this when you trust the network! content: | - - - + + + delegate_to: localhost - name: Disable HTTP and increase session timeout to max value 10800 secs @@ -172,22 +175,22 @@ EXAMPLES = r''' hostname: '{{ imc_host }}' username: '{{ imc_username }}' password: '{{ imc_password }}' - validate_certs: false + validate_certs: false # only do this when you trust the network! timeout: 120 content: | - - - + + + - - - + + + delegate_to: localhost -''' +""" -RETURN = r''' +RETURN = r""" aaLogin: - description: Cisco IMC XML output for the login, translated to JSON using Cobra convention + description: Cisco IMC XML output for the login, translated to JSON using Cobra convention. returned: success type: dict sample: | @@ -201,27 +204,27 @@ aaLogin: "response": "yes" } configConfMo: - description: Cisco IMC XML output for any configConfMo XML fragments, translated to JSON using Cobra convention + description: Cisco IMC XML output for any configConfMo XML fragments, translated to JSON using Cobra convention. returned: success type: dict sample: | elapsed: - description: Elapsed time in seconds + description: Elapsed time in seconds. returned: always type: int sample: 31 response: - description: HTTP response message, including content length + description: HTTP response message, including content length. returned: always type: str sample: OK (729 bytes) status: - description: The HTTP response status code + description: The HTTP response status code. returned: always type: dict sample: 200 error: - description: Cisco IMC XML error output for last request, translated to JSON using Cobra convention + description: Cisco IMC XML error output for last request, translated to JSON using Cobra convention. returned: failed type: dict sample: | @@ -233,24 +236,24 @@ error: "response": "yes" } error_code: - description: Cisco IMC error code + description: Cisco IMC error code. returned: failed type: str sample: ERR-xml-parse-error error_text: - description: Cisco IMC error message + description: Cisco IMC error message. returned: failed type: str sample: | XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed. input: - description: RAW XML input sent to the Cisco IMC, causing the error + description: RAW XML input sent to the Cisco IMC, causing the error. returned: failed type: str sample: | output: - description: RAW XML output received from the Cisco IMC, with error details + description: RAW XML output received from the Cisco IMC, with error details. returned: failed type: str sample: > @@ -258,12 +261,12 @@ output: response="yes" errorCode="ERR-xml-parse-error" invocationResult="594" - errorDescr="XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed.\n"/> -''' + errorDescr="XML PARSING ERROR: Element 'computeRackUnit', attribute 'admin_Power': The attribute 'admin_Power' is not allowed.\n" /> +""" -import datetime import os import traceback +from itertools import zip_longest LXML_ETREE_IMP_ERR = None try: @@ -282,9 +285,12 @@ except ImportError: HAS_XMLJSON_COBRA = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib -from ansible.module_utils.six.moves import zip_longest from ansible.module_utils.urls import fetch_url +from ansible_collections.community.general.plugins.module_utils.datetime import ( + now, +) + def imc_response(module, rawoutput, rawinput=''): ''' Handle IMC returned data ''' @@ -313,8 +319,7 @@ def merge(one, two): ''' Merge two complex nested datastructures into one''' if isinstance(one, dict) and isinstance(two, dict): copy = dict(one) - # copy.update({key: merge(one.get(key, None), two[key]) for key in two}) - copy.update(dict((key, merge(one.get(key, None), two[key])) for key in two)) + copy.update({key: merge(one.get(key, None), two[key]) for key in two}) return copy elif isinstance(one, list) and isinstance(two, list): @@ -368,14 +373,14 @@ def main(): else: module.fail_json(msg='Cannot find/access path:\n%s' % path) - start = datetime.datetime.utcnow() + start = now() # Perform login first url = '%s://%s/nuova' % (protocol, hostname) data = '' % (username, password) resp, auth = fetch_url(module, url, data=data, method='POST', timeout=timeout) if resp is None or auth['status'] != 200: - result['elapsed'] = (datetime.datetime.utcnow() - start).seconds + result['elapsed'] = (now() - start).seconds module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % auth, **result) result.update(imc_response(module, resp.read())) @@ -408,7 +413,7 @@ def main(): # Perform actual request resp, info = fetch_url(module, url, data=data, method='POST', timeout=timeout) if resp is None or info['status'] != 200: - result['elapsed'] = (datetime.datetime.utcnow() - start).seconds + result['elapsed'] = (now() - start).seconds module.fail_json(msg='Task failed with error %(status)s: %(msg)s' % info, **result) # Merge results with previous results @@ -424,7 +429,7 @@ def main(): result['changed'] = ('modified' in results) # Report success - result['elapsed'] = (datetime.datetime.utcnow() - start).seconds + result['elapsed'] = (now() - start).seconds module.exit_json(**result) finally: logout(module, url, cookie, timeout) diff --git a/plugins/modules/imgadm.py b/plugins/modules/imgadm.py index 2357fffa30..da016f8597 100644 --- a/plugins/modules/imgadm.py +++ b/plugins/modules/imgadm.py @@ -1,66 +1,66 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2016, 2017 Jasper Lievisse Adriaanse # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: imgadm short_description: Manage SmartOS images description: - - Manage SmartOS virtual machine images through imgadm(1M) + - Manage SmartOS virtual machine images through imgadm(1M). author: Jasper Lievisse Adriaanse (@jasperla) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: - force: - required: false - type: bool - description: - - Force a given operation (where supported by imgadm(1M)). - pool: - required: false - default: zones - description: - - zpool to import to or delete images from. - type: str - source: - required: false - description: - - URI for the image source. - type: str - state: - required: true - choices: [ present, absent, deleted, imported, updated, vacuumed ] - description: - - State the object operated on should be in. C(imported) is an alias for - for C(present) and C(deleted) for C(absent). When set to C(vacuumed) - and C(uuid) to C(*), it will remove all unused images. - type: str + force: + required: false + type: bool + description: + - Force a given operation (where supported by imgadm(1M)). + pool: + required: false + default: zones + description: + - The zpool to import to or delete images from. + type: str + source: + required: false + description: + - URI for the image source. + type: str + state: + required: true + choices: [present, absent, deleted, imported, updated, vacuumed] + description: + - State the object operated on should be in. V(imported) is an alias for for V(present) and V(deleted) for V(absent). + When set to V(vacuumed) and O(uuid=*), it removes all unused images. + type: str - type: - required: false - choices: [ imgapi, docker, dsapi ] - default: imgapi - description: - - Type for image sources. - type: str + type: + required: false + choices: [imgapi, docker, dsapi] + default: imgapi + description: + - Type for image sources. + type: str - uuid: - required: false - description: - - Image UUID. Can either be a full UUID or C(*) for all images. - type: str + uuid: + required: false + description: + - Image UUID. Can either be a full UUID or V(*) for all images. + type: str +""" -requirements: - - python >= 2.6 -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Import an image community.general.imgadm: uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764' @@ -96,25 +96,25 @@ EXAMPLES = ''' community.general.imgadm: source: 'https://docker.io' state: absent -''' +""" -RETURN = ''' +RETURN = r""" source: - description: Source that is managed. - returned: When not managing an image. - type: str - sample: https://datasets.project-fifo.net + description: Source that is managed. + returned: When not managing an image. + type: str + sample: https://datasets.project-fifo.net uuid: - description: UUID for an image operated on. - returned: When not managing an image source. - type: str - sample: 70e3ae72-96b6-11e6-9056-9737fd4d0764 + description: UUID for an image operated on. + returned: When not managing an image source. + type: str + sample: 70e3ae72-96b6-11e6-9056-9737fd4d0764 state: - description: State of the target, after execution. - returned: success - type: str - sample: 'present' -''' + description: State of the target, after execution. + returned: success + type: str + sample: 'present' +""" import re @@ -135,7 +135,7 @@ class Imgadm(object): self.uuid = module.params['uuid'] # Since there are a number of (natural) aliases, prevent having to look - # them up everytime we operate on `state`. + # them up every time we operate on `state`. if self.params['state'] in ['present', 'imported', 'updated']: self.present = True else: @@ -156,9 +156,9 @@ class Imgadm(object): def update_images(self): if self.uuid == '*': - cmd = '{0} update'.format(self.cmd) + cmd = [self.cmd, 'update'] else: - cmd = '{0} update {1}'.format(self.cmd, self.uuid) + cmd = [self.cmd, 'update', self.uuid] (rc, stdout, stderr) = self.module.run_command(cmd) @@ -167,7 +167,7 @@ class Imgadm(object): # There is no feedback from imgadm(1M) to determine if anything # was actually changed. So treat this as an 'always-changes' operation. - # Note that 'imgadm -v' produces unparseable JSON... + # Note that 'imgadm -v' produces unparsable JSON... self.changed = True def manage_sources(self): @@ -175,13 +175,13 @@ class Imgadm(object): source = self.params['source'] imgtype = self.params['type'] - cmd = '{0} sources'.format(self.cmd) + cmd = [self.cmd, 'sources'] if force: - cmd += ' -f' + cmd = cmd + ['-f'] if self.present: - cmd = '{0} -a {1} -t {2}'.format(cmd, source, imgtype) + cmd = cmd + ['-a', source, '-t', imgtype] (rc, stdout, stderr) = self.module.run_command(cmd) if rc != 0: @@ -220,7 +220,7 @@ class Imgadm(object): if state == 'vacuumed': # Unconditionally pass '--force', otherwise we're prompted with 'y/N' - cmd = '{0} vacuum -f'.format(self.cmd) + cmd = [self.cmd, 'vacuum', '-f'] (rc, stdout, stderr) = self.module.run_command(cmd) @@ -232,8 +232,7 @@ class Imgadm(object): else: self.changed = True if self.present: - cmd = '{0} import -P {1} -q {2}'.format(self.cmd, pool, self.uuid) - + cmd = [self.cmd, 'import', '-P', pool, '-q'] + ([self.uuid] if self.uuid else []) (rc, stdout, stderr) = self.module.run_command(cmd) if rc != 0: @@ -251,8 +250,7 @@ class Imgadm(object): if re.match(regex, stdout.splitlines()[-1]): self.changed = True else: - cmd = '{0} delete -P {1} {2}'.format(self.cmd, pool, self.uuid) - + cmd = [self.cmd, 'delete', '-P', pool] + ([self.uuid] if self.uuid else []) (rc, stdout, stderr) = self.module.run_command(cmd) regex = '.*ImageNotInstalled.*' diff --git a/plugins/modules/infinity.py b/plugins/modules/infinity.py index 4b0e835209..7f568faa0d 100644 --- a/plugins/modules/infinity.py +++ b/plugins/modules/infinity.py @@ -1,20 +1,25 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017, # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: infinity short_description: Manage Infinity IPAM using Rest API description: - Manage Infinity IPAM using REST API. author: - Meirong Liu (@MeganLiu) +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: none + diff_mode: + support: none options: server_ip: description: @@ -34,10 +39,10 @@ options: required: true action: description: - - Action to perform + - Action to perform. type: str required: true - choices: [add_network, delete_network, get_network, get_network_id, release_ip, release_network, reserve_network, reserve_next_available_ip ] + choices: [add_network, delete_network, get_network, get_network_id, release_ip, release_network, reserve_network, reserve_next_available_ip] network_id: description: - Network ID. @@ -48,11 +53,11 @@ options: type: str network_address: description: - - Network address with CIDR format (e.g., 192.168.310.0). + - Network address with CIDR format (for example V(192.168.310.0)). type: str network_size: description: - - Network bitmask (e.g. 255.255.255.220) or CIDR format (e.g., /26). + - Network bitmask (for example V(255.255.255.220) or CIDR format V(/26)). type: str network_name: description: @@ -60,25 +65,24 @@ options: type: str network_location: description: - - The parent network id for a given network. + - The parent network ID for a given network. type: int default: -1 network_type: description: - - Network type defined by Infinity + - Network type defined by Infinity. type: str - choices: [ lan, shared_lan, supernet ] + choices: [lan, shared_lan, supernet] default: lan network_family: description: - - Network family defined by Infinity, e.g. IPv4, IPv6 and Dual stack + - Network family defined by Infinity, for example V(IPv4), V(IPv6) and V(Dual stack). type: str - choices: [ '4', '6', dual ] + choices: ['4', '6', dual] default: '4' -''' +""" -EXAMPLES = r''' ---- +EXAMPLES = r""" - hosts: localhost connection: local strategy: debug @@ -95,35 +99,48 @@ EXAMPLES = r''' network_id: 1201 network_size: /28 register: infinity -''' +""" -RETURN = r''' +RETURN = r""" network_id: - description: id for a given network - returned: success - type: str - sample: '1501' + description: ID for a given network. + returned: success + type: str + sample: '1501' ip_info: - description: when reserve next available ip address from a network, the ip address info ) is returned. - returned: success - type: str - sample: '{"address": "192.168.10.3", "hostname": "", "FQDN": "", "domainname": "", "id": 3229}' -network_info: - description: when reserving a LAN network from a Infinity supernet by providing network_size, the information about the reserved network is returned. - returned: success - type: str - sample: { - "network_address": "192.168.10.32/28", - "network_family": "4", - "network_id": 3102, - "network_size": null, - "description": null, - "network_location": "3085", - "ranges": { "id": 0, "name": null,"first_ip": null,"type": null,"last_ip": null}, - "network_type": "lan", - "network_name": "'reserve_new_ansible_network'" + description: + - When reserve next available IP address from a network, the IP address info is returned. + - Please note that the value is a B(string) containing JSON data. + returned: success + type: str + sample: >- + { + "address": "192.168.10.3", + "hostname": "", + "FQDN": "", + "domainname": "", + "id": 3229 } -''' +network_info: + description: + - When reserving a LAN network from a Infinity supernet by providing network_size, the information about the reserved + network is returned. + - Please note that the value is a B(string) containing JSON data. + returned: success + type: str + sample: >- + { + "network_address": "192.168.10.32/28", + "network_family": "4", + "network_id": 3102, + "network_size": null, + "description": null, + "network_location": "3085", + "ranges": {"id": 0, "name": null, "first_ip": null, "type": null, "last_ip": null}, + "network_type": "lan", + "network_name": "'reserve_new_ansible_network'" + } +""" from ansible.module_utils.basic import AnsibleModule, json diff --git a/plugins/modules/influxdb_database.py b/plugins/modules/influxdb_database.py index 8ffbece606..600599ab0c 100644 --- a/plugins/modules/influxdb_database.py +++ b/plugins/modules/influxdb_database.py @@ -1,68 +1,69 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2016, Kamil Szczygiel # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: influxdb_database short_description: Manage InfluxDB databases description: - - Manage InfluxDB databases. + - Manage InfluxDB databases. author: "Kamil Szczygiel (@kamsz)" requirements: - - "python >= 2.6" - - "influxdb >= 0.9" - - requests + - "influxdb >= 0.9" + - requests +attributes: + check_mode: + support: full + diff_mode: + support: none options: - database_name: - description: - - Name of the database. - required: true - type: str - state: - description: - - Determines if the database should be created or destroyed. - choices: [ absent, present ] - default: present - type: str + database_name: + description: + - Name of the database. + required: true + type: str + state: + description: + - Determines if the database should be created or destroyed. + choices: [absent, present] + default: present + type: str extends_documentation_fragment: -- community.general.influxdb + - community.general.influxdb + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Example influxdb_database command from Ansible Playbooks - name: Create database community.general.influxdb_database: - hostname: "{{influxdb_ip_address}}" - database_name: "{{influxdb_database_name}}" + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" - name: Destroy database community.general.influxdb_database: - hostname: "{{influxdb_ip_address}}" - database_name: "{{influxdb_database_name}}" - state: absent + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" + state: absent - name: Create database using custom credentials community.general.influxdb_database: - hostname: "{{influxdb_ip_address}}" - username: "{{influxdb_username}}" - password: "{{influxdb_password}}" - database_name: "{{influxdb_database_name}}" - ssl: true - validate_certs: true -''' + hostname: "{{influxdb_ip_address}}" + username: "{{influxdb_username}}" + password: "{{influxdb_password}}" + database_name: "{{influxdb_database_name}}" + ssl: true + validate_certs: true +""" -RETURN = r''' +RETURN = r""" # only defaults -''' +""" try: import requests.exceptions diff --git a/plugins/modules/influxdb_query.py b/plugins/modules/influxdb_query.py index 14a65a60dc..1707d401f2 100644 --- a/plugins/modules/influxdb_query.py +++ b/plugins/modules/influxdb_query.py @@ -1,23 +1,24 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017, René Moser # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: influxdb_query short_description: Query data points from InfluxDB description: - Query data points from InfluxDB. author: "René Moser (@resmo)" requirements: - - "python >= 2.6" - "influxdb >= 0.9" +attributes: + check_mode: + support: full + diff_mode: + support: none options: query: description: @@ -30,11 +31,11 @@ options: required: true type: str extends_documentation_fragment: -- community.general.influxdb + - community.general.influxdb + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Query connections community.general.influxdb_query: hostname: "{{ influxdb_ip_address }}" @@ -52,17 +53,17 @@ EXAMPLES = r''' - name: Print results from the query ansible.builtin.debug: var: connection.query_results -''' +""" -RETURN = r''' +RETURN = r""" query_results: - description: Result from the query + description: Result from the query. returned: success type: list sample: - mean: 1245.5333333333333 time: "1970-01-01T00:00:00Z" -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native diff --git a/plugins/modules/influxdb_retention_policy.py b/plugins/modules/influxdb_retention_policy.py index 1b7d7eec9a..c1848a4694 100644 --- a/plugins/modules/influxdb_retention_policy.py +++ b/plugins/modules/influxdb_retention_policy.py @@ -1,141 +1,138 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2016, Kamil Szczygiel # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: influxdb_retention_policy short_description: Manage InfluxDB retention policies description: - - Manage InfluxDB retention policies. + - Manage InfluxDB retention policies. author: "Kamil Szczygiel (@kamsz)" requirements: - - "python >= 2.6" - - "influxdb >= 0.9" - - requests + - "influxdb >= 0.9" + - requests +attributes: + check_mode: + support: full + diff_mode: + support: none options: - database_name: - description: - - Name of the database. - required: true - type: str - policy_name: - description: - - Name of the retention policy. - required: true - type: str - state: - description: - - State of the retention policy. - choices: [ absent, present ] - default: present - type: str - version_added: 3.1.0 - duration: - description: - - Determines how long InfluxDB should keep the data. If specified, it - should be C(INF) or at least one hour. If not specified, C(INF) is - assumed. Supports complex duration expressions with multiple units. - - Required only if I(state) is set to C(present). - type: str - replication: - description: - - Determines how many independent copies of each point are stored in the cluster. - - Required only if I(state) is set to C(present). - type: int - default: - description: - - Sets the retention policy as default retention policy. - type: bool - default: false - shard_group_duration: - description: - - Determines the time range covered by a shard group. If specified it - must be at least one hour. If none, it's determined by InfluxDB by - the rentention policy's duration. Supports complex duration expressions - with multiple units. - type: str - version_added: '2.0.0' + database_name: + description: + - Name of the database. + required: true + type: str + policy_name: + description: + - Name of the retention policy. + required: true + type: str + state: + description: + - State of the retention policy. + choices: [absent, present] + default: present + type: str + version_added: 3.1.0 + duration: + description: + - Determines how long InfluxDB should keep the data. If specified, it should be V(INF) or at least one hour. If not + specified, V(INF) is assumed. Supports complex duration expressions with multiple units. + - Required only if O(state) is set to V(present). + type: str + replication: + description: + - Determines how many independent copies of each point are stored in the cluster. + - Required only if O(state) is set to V(present). + type: int + default: + description: + - Sets the retention policy as default retention policy. + type: bool + default: false + shard_group_duration: + description: + - Determines the time range covered by a shard group. If specified it must be at least one hour. If not provided, it + is determined by InfluxDB by the rentention policy's duration. Supports complex duration expressions with multiple + units. + type: str + version_added: '2.0.0' extends_documentation_fragment: -- community.general.influxdb + - community.general.influxdb + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Example influxdb_retention_policy command from Ansible Playbooks - name: Create 1 hour retention policy community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - duration: 1h - replication: 1 - ssl: true - validate_certs: true - state: present + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + duration: 1h + replication: 1 + ssl: true + validate_certs: true + state: present - name: Create 1 day retention policy with 1 hour shard group duration community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - duration: 1d - replication: 1 - shard_group_duration: 1h - state: present + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + duration: 1d + replication: 1 + shard_group_duration: 1h + state: present - name: Create 1 week retention policy with 1 day shard group duration community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - duration: 1w - replication: 1 - shard_group_duration: 1d - state: present + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + duration: 1w + replication: 1 + shard_group_duration: 1d + state: present - name: Create infinite retention policy with 1 week of shard group duration community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - duration: INF - replication: 1 - ssl: false - validate_certs: false - shard_group_duration: 1w - state: present + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + duration: INF + replication: 1 + ssl: false + shard_group_duration: 1w + state: present - name: Create retention policy with complex durations community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - duration: 5d1h30m - replication: 1 - ssl: false - validate_certs: false - shard_group_duration: 1d10h30m - state: present + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + duration: 5d1h30m + replication: 1 + ssl: false + shard_group_duration: 1d10h30m + state: present - name: Drop retention policy community.general.influxdb_retention_policy: - hostname: "{{ influxdb_ip_address }}" - database_name: "{{ influxdb_database_name }}" - policy_name: test - state: absent -''' + hostname: "{{ influxdb_ip_address }}" + database_name: "{{ influxdb_database_name }}" + policy_name: test + state: absent +""" -RETURN = r''' +RETURN = r""" # only defaults -''' +""" import re @@ -184,7 +181,7 @@ def parse_duration_literal(value, extended=False): lookup = (EXTENDED_DURATION_REGEX if extended else DURATION_REGEX).findall(value) for duration_literal in lookup: - filtered_literal = list(filter(None, duration_literal)) + filtered_literal = [_f for _f in duration_literal if _f] duration_val = float(filtered_literal[0]) duration += duration_val * DURATION_UNIT_NANOSECS[filtered_literal[1]] diff --git a/plugins/modules/influxdb_user.py b/plugins/modules/influxdb_user.py index 25bc2a95ce..b6351a0c27 100644 --- a/plugins/modules/influxdb_user.py +++ b/plugins/modules/influxdb_user.py @@ -1,25 +1,26 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017, Vitaliy Zhhuta # insipred by Kamil Szczygiel influxdb_database module # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function +from __future__ import annotations -__metaclass__ = type -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: influxdb_user short_description: Manage InfluxDB users description: - Manage InfluxDB users. author: "Vitaliy Zhhuta (@zhhuta)" requirements: - - "python >= 2.6" - "influxdb >= 0.9" +attributes: + check_mode: + support: full + diff_mode: + support: none options: user_name: description: @@ -34,29 +35,29 @@ options: admin: description: - Whether the user should be in the admin role or not. - - Since version 2.8, the role will also be updated. + - Since version 2.8, the role is also updated. default: false type: bool state: description: - State of the user. - choices: [ absent, present ] + choices: [absent, present] default: present type: str grants: description: - Privileges to grant to this user. - Takes a list of dicts containing the "database" and "privilege" keys. - - If this argument is not provided, the current grants will be left alone. - - If an empty list is provided, all grants for the user will be removed. + - If this argument is not provided, the current grants are left alone. + - If an empty list is provided, all grants for the user are removed. type: list elements: dict extends_documentation_fragment: -- community.general.influxdb + - community.general.influxdb + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a user on localhost using default login credentials community.general.influxdb_user: user_name: john @@ -96,11 +97,9 @@ EXAMPLES = r''' login_username: "{{ influxdb_username }}" login_password: "{{ influxdb_password }}" state: absent -''' +""" -RETURN = r''' -#only defaults -''' +RETURN = r"""#""" import json @@ -168,8 +167,14 @@ def drop_user(module, client, user_name): def set_user_grants(module, client, user_name, grants): changed = False + current_grants = [] try: current_grants = client.get_list_privileges(user_name) + except influx.exceptions.InfluxDBClientError as e: + if not module.check_mode or 'user not found' not in e.content: + module.fail_json(msg=e.content) + + try: parsed_grants = [] # Fix privileges wording for i, v in enumerate(current_grants): @@ -210,7 +215,7 @@ def main(): argument_spec.update( state=dict(default='present', type='str', choices=['present', 'absent']), user_name=dict(required=True, type='str'), - user_password=dict(required=False, type='str', no_log=True), + user_password=dict(type='str', no_log=True), admin=dict(default='False', type='bool'), grants=dict(type='list', elements='dict'), ) diff --git a/plugins/modules/influxdb_write.py b/plugins/modules/influxdb_write.py index 68e722ae1c..d0348aca01 100644 --- a/plugins/modules/influxdb_write.py +++ b/plugins/modules/influxdb_write.py @@ -1,23 +1,24 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017, René Moser # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: influxdb_write short_description: Write data points into InfluxDB description: - Write data points into InfluxDB. author: "René Moser (@resmo)" requirements: - - "python >= 2.6" - "influxdb >= 0.9" +attributes: + check_mode: + support: none + diff_mode: + support: none options: data_points: description: @@ -31,35 +32,35 @@ options: required: true type: str extends_documentation_fragment: -- community.general.influxdb + - community.general.influxdb + - community.general.attributes +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Write points into database community.general.influxdb_write: - hostname: "{{influxdb_ip_address}}" - database_name: "{{influxdb_database_name}}" - data_points: - - measurement: connections - tags: - host: server01 - region: us-west - time: "{{ ansible_date_time.iso8601 }}" - fields: - value: 2000 - - measurement: connections - tags: - host: server02 - region: us-east - time: "{{ ansible_date_time.iso8601 }}" - fields: - value: 3000 -''' + hostname: "{{influxdb_ip_address}}" + database_name: "{{influxdb_database_name}}" + data_points: + - measurement: connections + tags: + host: server01 + region: us-west + time: "{{ ansible_date_time.iso8601 }}" + fields: + value: 2000 + - measurement: connections + tags: + host: server02 + region: us-east + time: "{{ ansible_date_time.iso8601 }}" + fields: + value: 3000 +""" -RETURN = r''' +RETURN = r""" # only defaults -''' +""" from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native diff --git a/plugins/modules/ini_file.py b/plugins/modules/ini_file.py index ee4ad62b72..27b55c3bf4 100644 --- a/plugins/modules/ini_file.py +++ b/plugins/modules/ini_file.py @@ -1,89 +1,113 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2012, Jan-Piet Mens # Copyright (c) 2015, Ales Nosek # Copyright (c) 2017, Ansible Project +# Copyright (c) 2023, Ansible Project # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: ini_file short_description: Tweak settings in INI files -extends_documentation_fragment: files +extends_documentation_fragment: + - files + - community.general.attributes description: - - Manage (add, remove, change) individual settings in an INI-style file without having - to manage the file as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble). - - Adds missing sections if they don't exist. - - Before Ansible 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file. - - Since Ansible 2.3, this module adds missing ending newlines to files to keep in line with the POSIX standard, even when - no other modifications need to be applied. + - Manage (add, remove, change) individual settings in an INI-style file without having to manage the file as a whole with, + say, M(ansible.builtin.template) or M(ansible.builtin.assemble). + - Adds missing sections if they do not exist. + - This module adds missing ending newlines to files to keep in line with the POSIX standard, even when no other modifications + need to be applied. +attributes: + check_mode: + support: full + diff_mode: + support: full options: path: description: - Path to the INI-style file; this file is created if required. - - Before Ansible 2.3 this option was only usable as I(dest). type: path required: true - aliases: [ dest ] + aliases: [dest] section: description: - - Section name in INI file. This is added if I(state=present) automatically when - a single value is being set. - - If left empty or set to C(null), the I(option) will be placed before the first I(section). - - Using C(null) is also required if the config format does not support sections. + - Section name in INI file. This is added if O(state=present) automatically when a single value is being set. + - If being omitted, the O(option) is placed before the first O(section). + - Omitting O(section) is also required if the config format does not support sections. type: str - required: true + section_has_values: + type: list + elements: dict + required: false + suboptions: + option: + type: str + description: Matching O(section) must contain this option. + required: true + value: + type: str + description: Matching O(section_has_values[].option) must have this specific value. + values: + description: + - The string value to be associated with an O(section_has_values[].option). + - Mutually exclusive with O(section_has_values[].value). + - O(section_has_values[].value=v) is equivalent to O(section_has_values[].values=[v]). + type: list + elements: str + description: + - Among possibly multiple sections of the same name, select the first one that contains matching options and values. + - With O(state=present), if a suitable section is not found, a new section is added, including the required options. + - With O(state=absent), at most one O(section) is removed if it contains the values. + version_added: 8.6.0 option: description: - - If set (required for changing a I(value)), this is the name of the option. - - May be omitted if adding/removing a whole I(section). + - If set (required for changing a O(value)), this is the name of the option. + - May be omitted if adding/removing a whole O(section). type: str value: description: - - The string value to be associated with an I(option). - - May be omitted when removing an I(option). - - Mutually exclusive with I(values). - - I(value=v) is equivalent to I(values=[v]). + - The string value to be associated with an O(option). + - May be omitted when removing an O(option). + - Mutually exclusive with O(values). + - O(value=v) is equivalent to O(values=[v]). type: str values: description: - - The string value to be associated with an I(option). - - May be omitted when removing an I(option). - - Mutually exclusive with I(value). - - I(value=v) is equivalent to I(values=[v]). + - The string value to be associated with an O(option). + - May be omitted when removing an O(option). + - Mutually exclusive with O(value). + - O(value=v) is equivalent to O(values=[v]). type: list elements: str version_added: 3.6.0 backup: description: - - Create a backup file including the timestamp information so you can get - the original file back if you somehow clobbered it incorrectly. + - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered + it incorrectly. type: bool default: false state: description: - - If set to C(absent) and I(exclusive) set to C(true) all matching I(option) lines are removed. - - If set to C(absent) and I(exclusive) set to C(false) the specified I(option=value) lines are removed, - but the other I(option)s with the same name are not touched. - - If set to C(present) and I(exclusive) set to C(false) the specified I(option=values) lines are added, - but the other I(option)s with the same name are not touched. - - If set to C(present) and I(exclusive) set to C(true) all given I(option=values) lines will be - added and the other I(option)s with the same name are removed. + - If set to V(absent) and O(exclusive) set to V(true) all matching O(option) lines are removed. + - If set to V(absent) and O(exclusive) set to V(false) the specified O(option=value) lines are removed, but the other + O(option)s with the same name are not touched. + - If set to V(present) and O(exclusive) set to V(false) the specified O(option=values) lines are added, but the other + O(option)s with the same name are not touched. + - If set to V(present) and O(exclusive) set to V(true) all given O(option=values) lines are added and the other O(option)s + with the same name are removed. type: str - choices: [ absent, present ] + choices: [absent, present] default: present exclusive: description: - - If set to C(true) (default), all matching I(option) lines are removed when I(state=absent), - or replaced when I(state=present). - - If set to C(false), only the specified I(value(s)) are added when I(state=present), - or removed when I(state=absent), and existing ones are not modified. + - If set to V(true) (default), all matching O(option) lines are removed when O(state=absent), or replaced when O(state=present). + - If set to V(false), only the specified O(value)/O(values) are added when O(state=present), or removed when O(state=absent), + and existing ones are not modified. type: bool default: true version_added: 3.6.0 @@ -92,10 +116,16 @@ options: - Do not insert spaces before and after '=' symbol. type: bool default: false + ignore_spaces: + description: + - Do not change a line if doing so would only add or remove spaces before or after the V(=) symbol. + type: bool + default: false + version_added: 7.5.0 create: description: - - If set to C(false), the module will fail if the file does not already exist. - - By default it will create the file if it is missing. + - If set to V(false), the module fails if the file does not already exist. + - By default it creates the file if it is missing. type: bool default: true allow_no_value: @@ -103,17 +133,30 @@ options: - Allow option without value and without '=' symbol. type: bool default: false + modify_inactive_option: + description: + - By default the module replaces a commented line that matches the given option. + - Set this option to V(false) to avoid this. This is useful when you want to keep commented example C(key=value) pairs + for documentation purposes. + type: bool + default: true + version_added: 8.0.0 + follow: + description: + - This flag indicates that filesystem links, if they exist, should be followed. + - O(follow=true) can modify O(path) when combined with parameters such as O(mode). + type: bool + default: false + version_added: 7.1.0 notes: - - While it is possible to add an I(option) without specifying a I(value), this makes no sense. - - As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well. - - As of community.general 3.2.0, UTF-8 BOM markers are discarded when reading files. + - While it is possible to add an O(option) without specifying a O(value), this makes no sense. + - As of community.general 3.2.0, UTF-8 BOM markers are discarded when reading files. author: - - Jan-Piet Mens (@jpmens) - - Ales Nosek (@noseka1) -''' + - Jan-Piet Mens (@jpmens) + - Ales Nosek (@noseka1) +""" -EXAMPLES = r''' -# Before Ansible 2.3, option 'dest' was used instead of 'path' +EXAMPLES = r""" - name: Ensure "fav=lemonade is in section "[drinks]" in specified file community.general.ini_file: path: /etc/conf @@ -151,7 +194,65 @@ EXAMPLES = r''' - pepsi mode: '0600' state: present -''' + +- name: Add "beverage=lemon juice" outside a section in specified file + community.general.ini_file: + path: /etc/conf + option: beverage + value: lemon juice + state: present + +- name: Remove the peer configuration for 10.128.0.11/32 + community.general.ini_file: + path: /etc/wireguard/wg0.conf + section: Peer + section_has_values: + - option: AllowedIps + value: 10.128.0.11/32 + mode: '0600' + state: absent + +- name: Add "beverage=lemon juice" outside a section in specified file + community.general.ini_file: + path: /etc/conf + option: beverage + value: lemon juice + state: present + +- name: Update the public key for peer 10.128.0.12/32 + community.general.ini_file: + path: /etc/wireguard/wg0.conf + section: Peer + section_has_values: + - option: AllowedIps + value: 10.128.0.12/32 + option: PublicKey + value: xxxxxxxxxxxxxxxxxxxx + mode: '0600' + state: present + +- name: Remove the peer configuration for 10.128.0.11/32 + community.general.ini_file: + path: /etc/wireguard/wg0.conf + section: Peer + section_has_values: + - option: AllowedIps + value: 10.4.0.11/32 + mode: '0600' + state: absent + +- name: Update the public key for peer 10.128.0.12/32 + community.general.ini_file: + path: /etc/wireguard/wg0.conf + section: Peer + section_has_values: + - option: AllowedIps + value: 10.4.0.12/32 + option: PublicKey + value: xxxxxxxxxxxxxxxxxxxx + mode: '0600' + state: present +""" import io import os @@ -165,27 +266,47 @@ from ansible.module_utils.common.text.converters import to_bytes, to_text def match_opt(option, line): option = re.escape(option) - return re.match('[#;]?( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line) + return re.match('( |\t)*([#;]?)( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line) def match_active_opt(option, line): option = re.escape(option) - return re.match('( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line) + return re.match('()()( |\t)*(%s)( |\t)*(=|$)( |\t)*(.*)' % option, line) -def update_section_line(changed, section_lines, index, changed_lines, newline, msg): - option_changed = section_lines[index] != newline +def update_section_line(option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg): + option_changed = None + if ignore_spaces: + old_match = match_opt(option, section_lines[index]) + if not old_match.group(2): + new_match = match_opt(option, newline) + option_changed = old_match.group(8) != new_match.group(8) + if option_changed is None: + option_changed = section_lines[index] != newline + if option_changed: + section_lines[index] = newline changed = changed or option_changed if option_changed: msg = 'option changed' - section_lines[index] = newline changed_lines[index] = 1 return (changed, msg) -def do_ini(module, filename, section=None, option=None, values=None, +def check_section_has_values(section_has_values, section_lines): + if section_has_values is not None: + for condition in section_has_values: + for line in section_lines: + match = match_opt(condition["option"], line) + if match and (len(condition["values"]) == 0 or match.group(8) in condition["values"]): + break + else: + return False + return True + + +def do_ini(module, filename, section=None, section_has_values=None, option=None, values=None, state='present', exclusive=True, backup=False, no_extra_spaces=False, - create=True, allow_no_value=False): + ignore_spaces=False, create=True, allow_no_value=False, modify_inactive_option=True, follow=False): if section is not None: section = to_text(section) @@ -204,42 +325,47 @@ def do_ini(module, filename, section=None, option=None, values=None, after_header='%s (content)' % filename, ) - if not os.path.exists(filename): + if follow and os.path.islink(filename): + target_filename = os.path.realpath(filename) + else: + target_filename = filename + + if not os.path.exists(target_filename): if not create: - module.fail_json(rc=257, msg='Destination %s does not exist!' % filename) - destpath = os.path.dirname(filename) + module.fail_json(rc=257, msg='Destination %s does not exist!' % target_filename) + destpath = os.path.dirname(target_filename) if not os.path.exists(destpath) and not module.check_mode: os.makedirs(destpath) ini_lines = [] else: - with io.open(filename, 'r', encoding="utf-8-sig") as ini_file: + with io.open(target_filename, 'r', encoding="utf-8-sig") as ini_file: ini_lines = [to_text(line) for line in ini_file.readlines()] if module._diff: - diff['before'] = u''.join(ini_lines) + diff['before'] = ''.join(ini_lines) changed = False # ini file could be empty if not ini_lines: - ini_lines.append(u'\n') + ini_lines.append('\n') # last line of file may not contain a trailing newline - if ini_lines[-1] == u"" or ini_lines[-1][-1] != u'\n': - ini_lines[-1] += u'\n' + if ini_lines[-1] == "" or ini_lines[-1][-1] != '\n': + ini_lines[-1] += '\n' changed = True # append fake section lines to simplify the logic # At top: # Fake random section to do not match any other in the file # Using commit hash as fake section name - fake_section_name = u"ad01e11446efb704fcdbdb21f2c43757423d91c5" + fake_section_name = "ad01e11446efb704fcdbdb21f2c43757423d91c5" # Insert it at the beginning - ini_lines.insert(0, u'[%s]' % fake_section_name) + ini_lines.insert(0, '[%s]' % fake_section_name) # At bottom: - ini_lines.append(u'[') + ini_lines.append('[') # If no section is defined, fake section is used if not section: @@ -249,9 +375,9 @@ def do_ini(module, filename, section=None, option=None, values=None, section_start = section_end = 0 msg = 'OK' if no_extra_spaces: - assignment_format = u'%s=%s\n' + assignment_format = '%s=%s\n' else: - assignment_format = u'%s = %s\n' + assignment_format = '%s = %s\n' option_no_value_present = False @@ -260,15 +386,25 @@ def do_ini(module, filename, section=None, option=None, values=None, before = after = [] section_lines = [] + section_pattern = re.compile(to_text(r'^\[\s*%s\s*]' % re.escape(section.strip()))) + for index, line in enumerate(ini_lines): - # find start and end of section - if line.startswith(u'[%s]' % section): - within_section = True - section_start = index - elif line.startswith(u'['): - if within_section: + # end of section: + if within_section and line.startswith('['): + if check_section_has_values( + section_has_values, ini_lines[section_start:index] + ): section_end = index break + else: + # look for another section + within_section = False + section_start = section_end = 0 + + # find start and end of section + if section_pattern.match(line): + within_section = True + section_start = index before = ini_lines[0:section_start] section_lines = ini_lines[section_start:section_end] @@ -277,6 +413,12 @@ def do_ini(module, filename, section=None, option=None, values=None, # Keep track of changed section_lines changed_lines = [0] * len(section_lines) + # Determine whether to consider using commented out/inactive options or only active ones + if modify_inactive_option: + match_function = match_opt + else: + match_function = match_active_opt + # handling multiple instances of option=value when state is 'present' with/without exclusive is a bit complex # # 1. edit all lines where we have a option=value pair with a matching value in values[] @@ -286,23 +428,23 @@ def do_ini(module, filename, section=None, option=None, values=None, if state == 'present' and option: for index, line in enumerate(section_lines): - if match_opt(option, line): - match = match_opt(option, line) - if values and match.group(6) in values: - matched_value = match.group(6) + if match_function(option, line): + match = match_function(option, line) + if values and match.group(8) in values: + matched_value = match.group(8) if not matched_value and allow_no_value: # replace existing option with no value line(s) - newline = u'%s\n' % option + newline = '%s\n' % option option_no_value_present = True else: # replace existing option=value line(s) newline = assignment_format % (option, matched_value) - (changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg) + (changed, msg) = update_section_line(option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg) values.remove(matched_value) elif not values and allow_no_value: # replace existing option with no value line(s) - newline = u'%s\n' % option - (changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg) + newline = '%s\n' % option + (changed, msg) = update_section_line(option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg) option_no_value_present = True break @@ -310,14 +452,14 @@ def do_ini(module, filename, section=None, option=None, values=None, # override option with no value to option with value if not allow_no_value if len(values) > 0: for index, line in enumerate(section_lines): - if not changed_lines[index] and match_active_opt(option, line): + if not changed_lines[index] and match_function(option, line): newline = assignment_format % (option, values.pop(0)) - (changed, msg) = update_section_line(changed, section_lines, index, changed_lines, newline, msg) + (changed, msg) = update_section_line(option, changed, section_lines, index, changed_lines, ignore_spaces, newline, msg) if len(values) == 0: break # remove all remaining option occurrences from the rest of the section for index in range(len(section_lines) - 1, 0, -1): - if not changed_lines[index] and match_active_opt(option, section_lines[index]): + if not changed_lines[index] and match_function(option, section_lines[index]): del section_lines[index] del changed_lines[index] changed = True @@ -340,12 +482,12 @@ def do_ini(module, filename, section=None, option=None, values=None, changed = True elif element is None and allow_no_value: # insert option with no value line - section_lines.insert(index, u'%s\n' % option) + section_lines.insert(index, '%s\n' % option) msg = 'option added' changed = True elif option and not values and allow_no_value and not option_no_value_present: # insert option with no value line(s) - section_lines.insert(index, u'%s\n' % option) + section_lines.insert(index, '%s\n' % option) msg = 'option added' changed = True break @@ -361,7 +503,7 @@ def do_ini(module, filename, section=None, option=None, values=None, section_lines = new_section_lines elif not exclusive and len(values) > 0: # delete specified option=value line(s) - new_section_lines = [i for i in section_lines if not (match_active_opt(option, i) and match_active_opt(option, i).group(6) in values)] + new_section_lines = [i for i in section_lines if not (match_active_opt(option, i) and match_active_opt(option, i).group(8) in values)] if section_lines != new_section_lines: changed = True msg = 'option changed' @@ -381,24 +523,36 @@ def do_ini(module, filename, section=None, option=None, values=None, del ini_lines[-1:] if not within_section and state == 'present': - ini_lines.append(u'[%s]\n' % section) + ini_lines.append('[%s]\n' % section) msg = 'section and option added' + if section_has_values: + for condition in section_has_values: + if condition['option'] != option: + if len(condition['values']) > 0: + for value in condition['values']: + ini_lines.append(assignment_format % (condition['option'], value)) + elif allow_no_value: + ini_lines.append('%s\n' % condition['option']) + elif not exclusive: + for value in condition['values']: + if value not in values: + values.append(value) if option and values: for value in values: ini_lines.append(assignment_format % (option, value)) elif option and not values and allow_no_value: - ini_lines.append(u'%s\n' % option) + ini_lines.append('%s\n' % option) else: msg = 'only section added' changed = True if module._diff: - diff['after'] = u''.join(ini_lines) + diff['after'] = ''.join(ini_lines) backup_file = None if changed and not module.check_mode: if backup: - backup_file = module.backup_local(filename) + backup_file = module.backup_local(target_filename) encoded_ini_lines = [to_bytes(line) for line in ini_lines] try: @@ -410,10 +564,10 @@ def do_ini(module, filename, section=None, option=None, values=None, module.fail_json(msg="Unable to create temporary file %s", traceback=traceback.format_exc()) try: - module.atomic_move(tmpfile, filename) + module.atomic_move(tmpfile, os.path.abspath(target_filename)) except IOError: module.ansible.fail_json(msg='Unable to move temporary \ - file %s to %s, IOError' % (tmpfile, filename), traceback=traceback.format_exc()) + file %s to %s, IOError' % (tmpfile, target_filename), traceback=traceback.format_exc()) return (changed, backup_file, diff, msg) @@ -423,7 +577,12 @@ def main(): module = AnsibleModule( argument_spec=dict( path=dict(type='path', required=True, aliases=['dest']), - section=dict(type='str', required=True), + section=dict(type='str'), + section_has_values=dict(type='list', elements='dict', options=dict( + option=dict(type='str', required=True), + value=dict(type='str'), + values=dict(type='list', elements='str') + ), mutually_exclusive=[['value', 'values']]), option=dict(type='str'), value=dict(type='str'), values=dict(type='list', elements='str'), @@ -431,8 +590,11 @@ def main(): state=dict(type='str', default='present', choices=['absent', 'present']), exclusive=dict(type='bool', default=True), no_extra_spaces=dict(type='bool', default=False), + ignore_spaces=dict(type='bool', default=False), allow_no_value=dict(type='bool', default=False), - create=dict(type='bool', default=True) + modify_inactive_option=dict(type='bool', default=True), + create=dict(type='bool', default=True), + follow=dict(type='bool', default=False) ), mutually_exclusive=[ ['value', 'values'] @@ -443,6 +605,7 @@ def main(): path = module.params['path'] section = module.params['section'] + section_has_values = module.params['section_has_values'] option = module.params['option'] value = module.params['value'] values = module.params['values'] @@ -450,8 +613,11 @@ def main(): exclusive = module.params['exclusive'] backup = module.params['backup'] no_extra_spaces = module.params['no_extra_spaces'] + ignore_spaces = module.params['ignore_spaces'] allow_no_value = module.params['allow_no_value'] + modify_inactive_option = module.params['modify_inactive_option'] create = module.params['create'] + follow = module.params['follow'] if state == 'present' and not allow_no_value and value is None and not values: module.fail_json(msg="Parameter 'value(s)' must be defined if state=present and allow_no_value=False.") @@ -461,7 +627,17 @@ def main(): elif values is None: values = [] - (changed, backup_file, diff, msg) = do_ini(module, path, section, option, values, state, exclusive, backup, no_extra_spaces, create, allow_no_value) + if section_has_values: + for condition in section_has_values: + if condition['value'] is not None: + condition['values'] = [condition['value']] + elif condition['values'] is None: + condition['values'] = [] +# raise Exception("section_has_values: {}".format(section_has_values)) + + (changed, backup_file, diff, msg) = do_ini( + module, path, section, section_has_values, option, values, state, exclusive, backup, + no_extra_spaces, ignore_spaces, create, allow_no_value, modify_inactive_option, follow) if not module.check_mode and os.path.exists(path): file_args = module.load_file_common_arguments(module.params) diff --git a/plugins/modules/installp.py b/plugins/modules/installp.py index f1421bc390..57f70db687 100644 --- a/plugins/modules/installp.py +++ b/plugins/modules/installp.py @@ -1,50 +1,54 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # Copyright (c) 2017, Kairo Araujo # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = r''' ---- +DOCUMENTATION = r""" module: installp author: -- Kairo Araujo (@kairoaraujo) + - Kairo Araujo (@kairoaraujo) short_description: Manage packages on AIX description: - - Manage packages using 'installp' on AIX + - Manage packages using 'installp' on AIX. +extends_documentation_fragment: + - community.general.attributes +attributes: + check_mode: + support: full + diff_mode: + support: none options: accept_license: description: - - Whether to accept the license for the package(s). + - Whether to accept the license for the package(s). type: bool default: false name: description: - - One or more packages to install or remove. - - Use C(all) to install all packages available on informed C(repository_path). + - One or more packages to install or remove. + - Use V(all) to install all packages available on informed O(repository_path). type: list elements: str required: true - aliases: [ pkg ] + aliases: [pkg] repository_path: description: - - Path with AIX packages (required to install). + - Path with AIX packages (required to install). type: path state: description: - - Whether the package needs to be present on or absent from the system. + - Whether the package needs to be present on or absent from the system. type: str - choices: [ absent, present ] + choices: [absent, present] default: present notes: -- If the package is already installed, even the package/fileset is new, the module will not install it. -''' + - If the package is already installed, even the package/fileset is new, the module does not install it. +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Install package foo community.general.installp: name: foo @@ -77,9 +81,9 @@ EXAMPLES = r''' community.general.installp: name: bos.sysmgt.nim.master state: absent -''' +""" -RETURN = r''' # ''' +RETURN = r""" # """ import os import re @@ -99,7 +103,7 @@ def _check_new_pkg(module, package, repository_path): if os.path.isdir(repository_path): installp_cmd = module.get_bin_path('installp', True) - rc, package_result, err = module.run_command("%s -l -MR -d %s" % (installp_cmd, repository_path)) + rc, package_result, err = module.run_command([installp_cmd, "-l", "-MR", "-d", repository_path]) if rc != 0: module.fail_json(msg="Failed to run installp.", rc=rc, err=err) @@ -126,7 +130,7 @@ def _check_new_pkg(module, package, repository_path): def _check_installed_pkg(module, package, repository_path): """ Check the package on AIX. - It verifies if the package is installed and informations + It verifies if the package is installed and information :param module: Ansible module parameters spec. :param package: Package/fileset name. @@ -135,7 +139,7 @@ def _check_installed_pkg(module, package, repository_path): """ lslpp_cmd = module.get_bin_path('lslpp', True) - rc, lslpp_result, err = module.run_command("%s -lcq %s*" % (lslpp_cmd, package)) + rc, lslpp_result, err = module.run_command([lslpp_cmd, "-lcq", "%s*" % (package, )]) if rc == 1: package_state = ' '.join(err.split()[-2:]) @@ -166,7 +170,7 @@ def remove(module, installp_cmd, packages): if pkg_check: if not module.check_mode: - rc, remove_out, err = module.run_command("%s -u %s" % (installp_cmd, package)) + rc, remove_out, err = module.run_command([installp_cmd, "-u", package]) if rc != 0: module.fail_json(msg="Failed to run installp.", rc=rc, err=err) remove_count += 1 @@ -195,8 +199,8 @@ def install(module, installp_cmd, packages, repository_path, accept_license): already_installed_pkgs = {} accept_license_param = { - True: '-Y', - False: '', + True: ['-Y'], + False: [], } # Validate if package exists on repository path. @@ -223,7 +227,8 @@ def install(module, installp_cmd, packages, repository_path, accept_license): else: if not module.check_mode: - rc, out, err = module.run_command("%s -a %s -X -d %s %s" % (installp_cmd, accept_license_param[accept_license], repository_path, package)) + rc, out, err = module.run_command( + [installp_cmd, "-a"] + accept_license_param[accept_license] + ["-X", "-d", repository_path, package]) if rc != 0: module.fail_json(msg="Failed to run installp", rc=rc, err=err) installed_pkgs.append(package) diff --git a/plugins/modules/interfaces_file.py b/plugins/modules/interfaces_file.py index fe4223a12f..c7038d1008 100644 --- a/plugins/modules/interfaces_file.py +++ b/plugins/modules/interfaces_file.py @@ -1,135 +1,142 @@ #!/usr/bin/python -# -*- coding: utf-8 -*- # # Copyright (c) 2016, Roman Belyakovsky # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) # SPDX-License-Identifier: GPL-3.0-or-later -from __future__ import absolute_import, division, print_function -__metaclass__ = type +from __future__ import annotations -DOCUMENTATION = ''' ---- +DOCUMENTATION = r""" module: interfaces_file -short_description: Tweak settings in /etc/network/interfaces files -extends_documentation_fragment: files +short_description: Tweak settings in C(/etc/network/interfaces) files +extends_documentation_fragment: + - ansible.builtin.files + - community.general.attributes description: - - Manage (add, remove, change) individual interface options in an interfaces-style file without having - to manage the file as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble). Interface has to be presented in a file. - - Read information about interfaces from interfaces-styled files + - Manage (add, remove, change) individual interface options in an interfaces-style file without having to manage the file + as a whole with, say, M(ansible.builtin.template) or M(ansible.builtin.assemble). Interface has to be presented in a file. + - Read information about interfaces from interfaces-styled files. +attributes: + check_mode: + support: full + diff_mode: + support: none options: dest: type: path description: - - Path to the interfaces file + - Path to the interfaces file. default: /etc/network/interfaces iface: type: str description: - - Name of the interface, required for value changes or option remove + - Name of the interface, required for value changes or option remove. address_family: type: str description: - - Address family of the interface, useful if same interface name is used for both inet and inet6 + - Address family of the interface, useful if same interface name is used for both V(inet) and V(inet6). option: type: str description: - - Name of the option, required for value changes or option remove + - Name of the option, required for value changes or option remove. value: type: str description: - - If I(option) is not presented for the I(interface) and I(state) is C(present) option will be added. - If I(option) already exists and is not C(pre-up), C(up), C(post-up) or C(down), it's value will be updated. - C(pre-up), C(up), C(post-up) and C(down) options can't be updated, only adding new options, removing existing - ones or cleaning the whole option set are supported + - If O(option) is not presented for the O(iface) and O(state) is V(present), then O(option) is added. If O(option) already + exists and is not V(pre-up), V(up), V(post-up) or V(down), its value is updated. V(pre-up), V(up), V(post-up) and + V(down) options cannot be updated, only adding new options, removing existing ones or cleaning the whole option set + are supported. backup: description: - - Create a backup file including the timestamp information so you can get - the original file back if you somehow clobbered it incorrectly. + - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered + it incorrectly. type: bool default: false state: type: str description: - - If set to C(absent) the option or section will be removed if present instead of created. + - If set to V(absent) the option or section is removed if present instead of created. default: "present" - choices: [ "present", "absent" ] + choices: ["present", "absent"] notes: - - If option is defined multiple times last one will be updated but all will be deleted in case of an absent state + - If option is defined multiple times last one is updated but all others are deleted in case of an O(state=absent). requirements: [] author: "Roman Belyakovsky (@hryamzik)" -''' +""" -RETURN = ''' +RETURN = r""" dest: - description: destination file/path - returned: success - type: str - sample: "/etc/network/interfaces" + description: Destination file/path. + returned: success + type: str + sample: "/etc/network/interfaces" ifaces: - description: interfaces dictionary - returned: success - type: complex - contains: - ifaces: - description: interface dictionary - returned: success - type: dict - contains: - eth0: - description: Name of the interface - returned: success - type: dict - contains: - address_family: - description: interface address family - returned: success - type: str - sample: "inet" - method: - description: interface method - returned: success - type: str - sample: "manual" - mtu: - description: other options, all values returned as strings - returned: success - type: str - sample: "1500" - pre-up: - description: list of C(pre-up) scripts - returned: success - type: list - sample: - - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" - - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" - up: - description: list of C(up) scripts - returned: success - type: list - sample: - - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" - - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" - post-up: - description: list of C(post-up) scripts - returned: success - type: list - sample: - - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" - - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" - down: - description: list of C(down) scripts - returned: success - type: list - sample: - - "route del -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" - - "route del -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" -... -''' + description: Interfaces dictionary. + returned: success + type: dict + contains: + ifaces: + description: Interface dictionary. + returned: success + type: dict + contains: + eth0: + description: Name of the interface. + returned: success + type: dict + contains: + address_family: + description: Interface address family. + returned: success + type: str + sample: "inet" + method: + description: Interface method. + returned: success + type: str + sample: "manual" + mtu: + description: Other options, all values returned as strings. + returned: success + type: str + sample: "1500" + pre-up: + description: List of C(pre-up) scripts. + returned: success + type: list + elements: str + sample: + - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" + - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" + up: + description: List of C(up) scripts. + returned: success + type: list + elements: str + sample: + - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" + - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" + post-up: + description: List of C(post-up) scripts. + returned: success + type: list + elements: str + sample: + - "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" + - "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" + down: + description: List of C(down) scripts. + returned: success + type: list + elements: str + sample: + - "route del -net 10.10.10.0/24 gw 10.10.10.1 dev eth1" + - "route del -net 10.10.11.0/24 gw 10.10.11.1 dev eth2" +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Set eth1 mtu configuration value to 8000 community.general.interfaces_file: dest: /etc/network/interfaces.d/eth1.cfg @@ -139,7 +146,7 @@ EXAMPLES = ''' backup: true state: present register: eth1_cfg -''' +""" import os import re @@ -149,20 +156,22 @@ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_bytes -def line_dict(line): +def lineDict(line): return {'line': line, 'line_type': 'unknown'} -def make_option_dict(line, iface, option, value, address_family): +def optionDict(line, iface, option, value, address_family): return {'line': line, 'iface': iface, 'option': option, 'value': value, 'line_type': 'option', 'address_family': address_family} -def get_option_value(line): - patt = re.compile(r'^\s+(?P